diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..851d7a07
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,11 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
+
+version: 2
+updates:
+ - package-ecosystem: "gomod"
+ directory: "/"
+ schedule:
+ interval: "daily"
diff --git a/.github/workflows/build-and-push.yml b/.github/workflows/build-and-push.yml
index b6c37a71..8226039f 100644
--- a/.github/workflows/build-and-push.yml
+++ b/.github/workflows/build-and-push.yml
@@ -1,14 +1,16 @@
-name: "Build GARM images"
+name: "Build and push GARM images"
on:
- workflow_dispatch:
+ workflow_call:
inputs:
push_to_project:
description: "Project to build images for"
- required: true
+ required: false
+ type: string
default: "ghcr.io/cloudbase"
ref:
description: "Ref to build"
- required: true
+ required: false
+ type: string
default: "main"
permissions:
@@ -19,10 +21,10 @@ jobs:
permissions:
packages: write
name: "Build GARM images"
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: "Checkout"
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: src/github.com/cloudbase/garm
fetch-depth: 0
@@ -37,13 +39,23 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- - name: Build and push
+ - name: Build and push image
+ env:
+ IMAGE_REGISTRY: ${{ inputs.push_to_project }}
+ GH_REF: ${{ inputs.ref }}
+ working-directory: src/github.com/cloudbase/garm
run: |
- cd src/github.com/cloudbase/garm
- VERSION=$(git describe --tags --match='v[0-9]*' --always ${{ github.event.inputs.ref }})
+ if [ "$GH_REF" == "main" ]; then
+ IMAGE_TAG="nightly"
+ else
+ IMAGE_TAG=$(git describe --tags --match='v[0-9]*' --always ${GH_REF})
+ fi
docker buildx build \
--provenance=false \
--platform linux/amd64,linux/arm64 \
- --build-arg="GARM_REF=${{ github.event.inputs.ref }}" \
- -t ${{ github.event.inputs.push_to_project }}/garm:"${VERSION}" \
- --push .
\ No newline at end of file
+ --label "org.opencontainers.image.source=https://github.com/cloudbase/garm/tree/${GH_REF}" \
+ --label "org.opencontainers.image.description=GARM ${GH_REF}" \
+ --label "org.opencontainers.image.licenses=Apache 2.0" \
+ --build-arg="GARM_REF=${GH_REF}" \
+ -t ${IMAGE_REGISTRY}/garm:"${IMAGE_TAG}" \
+ --push .
diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml
index 148c60de..bde4f0f0 100644
--- a/.github/workflows/go-tests.yml
+++ b/.github/workflows/go-tests.yml
@@ -4,9 +4,11 @@ on:
push:
branches:
- main
+ - 'release/**'
pull_request:
branches:
- main
+ - 'release/**'
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref_name }}
@@ -17,23 +19,22 @@ permissions: {}
jobs:
linters:
name: Linters
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Install dependencies
run: |
sudo apt-get update
- sudo apt-get install -y libbtrfs-dev build-essential
+ sudo apt-get install -y libbtrfs-dev build-essential apg jq
- - uses: actions/setup-go@v3
- with:
- go-version: 'stable'
- uses: actions/checkout@v3
- - uses: golangci/golangci-lint-action@v3
+ - uses: actions/setup-go@v5
with:
- skip-cache: true
- args: --timeout=8m --build-tags testing
+ go-version-file: go.mod
+
+ - name: make lint
+ run: make golangci-lint && GOLANGCI_LINT_EXTRA_ARGS="--timeout=8m --build-tags=testing,integration" make lint
- name: Verify go vendor, go modules and gofmt
run: |
sudo apt-get install -y jq
@@ -43,15 +44,39 @@ jobs:
runs-on: ubuntu-latest
needs: [linters]
steps:
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y libbtrfs-dev build-essential apg jq default-jre
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: '>=v24.5.0'
+
+ - name: Set up openapi-generator-cli
+ run: |
+ mkdir -p $HOME/openapi-generator
+ cd $HOME/openapi-generator
+ npm install @openapitools/openapi-generator-cli
+ echo "$HOME/openapi-generator/node_modules/.bin" >> $GITHUB_PATH
+
- name: Checkout
uses: actions/checkout@v3
- name: Setup Golang
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
go-version-file: go.mod
- run: go version
+ - name: Run go generate
+ run: |
+ GOTOOLCHAIN=go1.24.6 make generate
+
- name: Run GARM Go Tests
run: make go-test
+
+ - name: Run web UI tests
+ run: |
+ make webui-test
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
new file mode 100644
index 00000000..04072b20
--- /dev/null
+++ b/.github/workflows/integration-tests.yml
@@ -0,0 +1,122 @@
+name: Integration Tests
+on:
+ workflow_dispatch: {}
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ integration-tests:
+ runs-on: ubuntu-noble-garm
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup Golang
+ uses: actions/setup-go@v3
+ with:
+ go-version-file: go.mod
+
+ - name: Setup LXD
+ uses: canonical/setup-lxd@main
+ with:
+ channel: latest/stable
+
+ - name: Install dependencies
+ run: |
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get -qq update && sudo apt-get -qq install -y apg coreutils make jq build-essential libsqlite3-dev libsqlite3-0
+
+ - name: Set up tunnel
+ shell: bash
+ run: |
+ mkdir -p /home/runner/.ssh
+ echo "${{ secrets.SSH_PRIVATE_KEY }}" > /home/runner/.ssh/ssh_key
+ sudo chown -R runner:runner /home/runner/.ssh
+ sudo chmod 500 /home/runner/.ssh
+ sudo chmod 400 /home/runner/.ssh/ssh_key
+
+ SUBDOMAIN=$(apg -a 0 -M l -m 12 -n 1)
+ echo "::add-mask::$SUBDOMAIN"
+
+ BASE_URL="${{ secrets.TUNNEL_BASE_URL }}"
+ GARM_BASE_URL="https://$SUBDOMAIN.$BASE_URL"
+ echo "::add-mask::$GARM_BASE_URL"
+
+ echo "GARM_BASE_URL=$GARM_BASE_URL" >> $GITHUB_ENV
+
+ cat <> $GITHUB_ENV
+ echo "REPO_WEBHOOK_SECRET=$REPO_WEBHOOK_SECRET" >> $GITHUB_ENV
+ echo "ORG_WEBHOOK_SECRET=$ORG_WEBHOOK_SECRET" >> $GITHUB_ENV
+ echo "GARM_CHECKOUT_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV
+
+ - name: Create logs directory
+ if: always()
+ run: sudo mkdir -p /artifacts-logs && sudo chmod 777 /artifacts-logs
+
+ - name: Run integration tests
+ run: |
+ set -o pipefail
+ set -o errexit
+ make integration 2>&1
+ env:
+ ORG_NAME: gsamfira
+ REPO_NAME: garm-testing
+ CREDENTIALS_NAME: test-garm-creds
+ WORKFLOW_FILE_NAME: test.yml
+ GH_TOKEN: ${{ secrets.GH_OAUTH_TOKEN }}
+ LXD_REMOTE_SERVER: ${{ secrets.LXD_REMOTE_SERVER }}
+
+ - name: Show GARM logs
+ if: always()
+ run: |
+ sudo systemctl status garm@runner || true
+ sudo journalctl --no-pager 2>&1 > /artifacts-logs/system.log
+ sudo journalctl -u garm@runner --no-pager 2>&1 > /artifacts-logs/garm.log
+
+ - name: Upload GARM and e2e logs
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: garm-logs
+ path: /artifacts-logs
+
+ - name: Cleanup orphan GARM resources via GitHub API
+ if: always()
+ run: |
+ set -o pipefail
+ set -o errexit
+
+ sudo systemctl stop garm@runner || true
+ go run ./test/integration/gh_cleanup/main.go || true
+ env:
+ ORG_NAME: gsamfira
+ REPO_NAME: garm-testing
+ GH_TOKEN: ${{ secrets.GH_OAUTH_TOKEN }}
diff --git a/.github/workflows/trigger-manual.yml b/.github/workflows/trigger-manual.yml
new file mode 100644
index 00000000..faf166d4
--- /dev/null
+++ b/.github/workflows/trigger-manual.yml
@@ -0,0 +1,19 @@
+name: Manual build of GARM images
+on:
+ workflow_dispatch:
+ inputs:
+ push_to_project:
+ description: "Project to build images for"
+ required: true
+ default: "ghcr.io/cloudbase"
+ ref:
+ description: "Ref to build"
+ required: true
+ default: "main"
+
+jobs:
+ call-build-and-push:
+ uses: ./.github/workflows/build-and-push.yml
+ with:
+ push_to_project: ${{ inputs.push_to_project }}
+ ref: ${{ inputs.ref }}
\ No newline at end of file
diff --git a/.github/workflows/trigger-nightly.yml b/.github/workflows/trigger-nightly.yml
new file mode 100644
index 00000000..e0b83856
--- /dev/null
+++ b/.github/workflows/trigger-nightly.yml
@@ -0,0 +1,10 @@
+name: Nightly build of GARM images
+on:
+ schedule:
+ - cron: "0 2 * * *"
+
+jobs:
+ call-build-and-push:
+ uses: ./.github/workflows/build-and-push.yml
+ with:
+ ref: "main"
diff --git a/.gitignore b/.gitignore
index c4a5d98b..54c931c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,7 @@
*.dll
*.so
*.dylib
+*.DS_Store
# Test binary, built with `go test -c`
*.test
@@ -16,3 +17,11 @@ bin/
# vendor/
.vscode
cmd/temp
+build/
+release/
+node_modules/
+.svelte-kit/
+debug.html
+git_push.sh
+webapp/src/lib/api/generated/docs
+.env
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 00000000..8dee07f5
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: MIT
+linters:
+ disable-all: true
+ fast: false
+ enable:
+ - gci
+ - goconst
+ - gocritic
+ - gocyclo
+ - gofmt
+ - gofumpt
+ - goimports
+ - godox
+ - govet
+ - gosec
+ - gosimple
+ - importas
+ - ineffassign
+ - loggercheck
+ - misspell
+ - nakedret
+ - nilerr
+ - predeclared
+ - promlinter
+ - revive
+ - staticcheck
+ - unconvert
+ - unused
+ - wastedassign
+ - whitespace
+
+linters-settings:
+ gci:
+ sections:
+ - standard
+ - default
+ - prefix(github.com/cloudbase/garm)
+
+ goimports:
+ local-prefixes: github.com/cloudbase/garm
+
+ gosec:
+ excludes:
+ - G115
diff --git a/.mockery.yaml b/.mockery.yaml
new file mode 100644
index 00000000..b7858821
--- /dev/null
+++ b/.mockery.yaml
@@ -0,0 +1,27 @@
+with-expecter: true
+dir: "mocks"
+mockname: "{{ .InterfaceName }}"
+outpkg: "mocks"
+filename: "{{ .InterfaceName }}.go"
+# V3 compatibility settings
+resolve-type-alias: false
+disable-version-string: true
+issue-845-fix: true
+packages:
+ # Database store interfaces
+ github.com/cloudbase/garm/database/common:
+ interfaces:
+ Store:
+ config:
+ dir: "{{ .InterfaceDir }}/mocks"
+ # Runner interfaces
+ github.com/cloudbase/garm/runner:
+ interfaces:
+ PoolManagerController:
+ config:
+ dir: "{{ .InterfaceDir }}/mocks"
+ # Runner common interfaces (generate all interfaces in this package)
+ github.com/cloudbase/garm/runner/common:
+ config:
+ dir: "{{ .InterfaceDir }}/mocks"
+ all: true
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index 80d9b2a7..81033292 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -3,27 +3,75 @@ ARG GARM_REF
LABEL stage=builder
-RUN apk add musl-dev gcc libtool m4 autoconf g++ make libblkid util-linux-dev git linux-headers
-RUN git config --global --add safe.directory /build
+RUN apk add --no-cache musl-dev gcc libtool m4 autoconf g++ make libblkid util-linux-dev git linux-headers upx curl jq
+RUN git config --global --add safe.directory /build && git config --global --add advice.detachedHead false
+RUN echo ${GARM_REF}
ADD . /build/garm
-RUN cd /build/garm && git checkout ${GARM_REF}
-RUN git clone https://github.com/cloudbase/garm-provider-azure /build/garm-provider-azure
-RUN git clone https://github.com/cloudbase/garm-provider-openstack /build/garm-provider-openstack
-RUN cd /build/garm && go build -o /bin/garm \
- -tags osusergo,netgo,sqlite_omit_load_extension \
- -ldflags "-linkmode external -extldflags '-static' -s -w -X main.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" \
- /build/garm/cmd/garm
-RUN mkdir -p /opt/garm/providers.d
-RUN cd /build/garm-provider-azure && go build -ldflags="-linkmode external -extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-azure .
-RUN cd /build/garm-provider-openstack && go build -ldflags="-linkmode external -extldflags '-static' -s -w" -o /opt/garm/providers.d/garm-provider-openstack .
+RUN git -C /build/garm checkout ${GARM_REF}
+RUN cd /build/garm \
+ && go build -o /bin/garm \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-linkmode external -extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" \
+ /build/garm/cmd/garm && upx /bin/garm
+RUN cd /build/garm/cmd/garm-cli \
+ && go build -o /bin/garm-cli \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-linkmode external -extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" \
+ . && upx /bin/garm-cli
+RUN set -ex; \
+ mkdir -p /opt/garm/providers.d; \
+ for repo in \
+ cloudbase/garm-provider-azure \
+ cloudbase/garm-provider-openstack \
+ cloudbase/garm-provider-lxd \
+ cloudbase/garm-provider-incus \
+ cloudbase/garm-provider-aws \
+ cloudbase/garm-provider-gcp \
+ cloudbase/garm-provider-equinix \
+ flatcar/garm-provider-linode \
+ mercedes-benz/garm-provider-k8s; \
+ do \
+ export PROVIDER_NAME="$(basename $repo)"; \
+ export PROVIDER_SUBDIR=""; \
+ if [ "$GARM_REF" == "main" ]; then \
+ export PROVIDER_REF="main"; \
+ else \
+ export PROVIDER_REF="$(curl -s -L https://api.github.com/repos/$repo/releases/latest | jq -r '.tag_name')"; \
+ fi; \
+ git clone --branch "$PROVIDER_REF" "https://github.com/$repo" "/build/$PROVIDER_NAME"; \
+ case $PROVIDER_NAME in \
+ "garm-provider-k8s") \
+ export PROVIDER_SUBDIR="cmd/garm-provider-k8s"; \
+ export PROVIDER_LDFLAGS="-linkmode external -extldflags \"-static\" -s -w"; \
+ ;; \
+ "garm-provider-linode") \
+ export PROVIDER_LDFLAGS="-linkmode external -extldflags \"-static\" -s -w"; \
+ ;; \
+ *) \
+ export PROVIDER_VERSION=$(git -C /build/$PROVIDER_NAME describe --tags --match='v[0-9]*' --dirty --always); \
+ export PROVIDER_LDFLAGS="-linkmode external -extldflags \"-static\" -s -w -X main.Version=$PROVIDER_VERSION"; \
+ ;; \
+ esac; \
+ cd "/build/$PROVIDER_NAME/$PROVIDER_SUBDIR" \
+ && go build -ldflags="$PROVIDER_LDFLAGS" -o /opt/garm/providers.d/$PROVIDER_NAME . \
+ && upx /opt/garm/providers.d/$PROVIDER_NAME; \
+ done
-FROM scratch
+FROM busybox
COPY --from=builder /bin/garm /bin/garm
+COPY --from=builder /bin/garm-cli /bin/garm-cli
COPY --from=builder /opt/garm/providers.d/garm-provider-openstack /opt/garm/providers.d/garm-provider-openstack
+COPY --from=builder /opt/garm/providers.d/garm-provider-lxd /opt/garm/providers.d/garm-provider-lxd
+COPY --from=builder /opt/garm/providers.d/garm-provider-incus /opt/garm/providers.d/garm-provider-incus
COPY --from=builder /opt/garm/providers.d/garm-provider-azure /opt/garm/providers.d/garm-provider-azure
+COPY --from=builder /opt/garm/providers.d/garm-provider-aws /opt/garm/providers.d/garm-provider-aws
+COPY --from=builder /opt/garm/providers.d/garm-provider-gcp /opt/garm/providers.d/garm-provider-gcp
+COPY --from=builder /opt/garm/providers.d/garm-provider-equinix /opt/garm/providers.d/garm-provider-equinix
+
+COPY --from=builder /opt/garm/providers.d/garm-provider-k8s /opt/garm/providers.d/garm-provider-k8s
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
ENTRYPOINT ["/bin/garm", "-config", "/etc/garm/config.toml"]
diff --git a/Dockerfile.build-static b/Dockerfile.build-static
index ae023b71..2ed27168 100644
--- a/Dockerfile.build-static
+++ b/Dockerfile.build-static
@@ -12,4 +12,6 @@ RUN wget http://musl.cc/aarch64-linux-musl-cross.tgz -O /tmp/aarch64-linux-musl-
ADD ./scripts/build-static.sh /build-static.sh
RUN chmod +x /build-static.sh
+ADD . /build/garm
+
CMD ["/bin/sh"]
diff --git a/Makefile b/Makefile
index b965d598..714d2465 100644
--- a/Makefile
+++ b/Makefile
@@ -1,53 +1,139 @@
-SHELL := bash
+SHELL := /bin/bash
+export SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit
+.ONESHELL:
+
+GEN_PASSWORD=$(shell (/usr/bin/apg -n1 -m32))
IMAGE_TAG = garm-build
-USER_ID=$(shell ((docker --version | grep -q podman) && echo "0" || id -u))
-USER_GROUP=$(shell ((docker --version | grep -q podman) && echo "0" || id -g))
+IMAGE_BUILDER=$(shell (which docker || which podman))
+IS_PODMAN=$(shell (($(IMAGE_BUILDER) --version | grep -q podman) && echo "yes" || echo "no"))
+USER_ID=$(if $(filter yes,$(IS_PODMAN)),0,$(shell id -u))
+USER_GROUP=$(if $(filter yes,$(IS_PODMAN)),0,$(shell id -g))
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
GOPATH ?= $(shell go env GOPATH)
VERSION ?= $(shell git describe --tags --match='v[0-9]*' --dirty --always)
+GARM_REF ?= $(shell git rev-parse --abbrev-ref HEAD)
GO ?= go
+export GARM_PASSWORD ?= ${GEN_PASSWORD}
+export REPO_WEBHOOK_SECRET = ${GEN_PASSWORD}
+export ORG_WEBHOOK_SECRET = ${GEN_PASSWORD}
+export CREDENTIALS_NAME ?= test-garm-creds
+export WORKFLOW_FILE_NAME ?= test.yml
+export GARM_ADMIN_USERNAME ?= admin
+
+ifeq ($(IS_PODMAN),yes)
+ EXTRA_ARGS := -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt
+endif
+
+
+.PHONY: help
+help: ## Display this help.
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
default: build
-.PHONY : build-static test install-lint-deps lint go-test fmt fmtcheck verify-vendor verify
-build-static:
- @echo Building garm
- docker build --tag $(IMAGE_TAG) -f Dockerfile.build-static .
- docker run --rm -e USER_ID=$(USER_ID) -e USER_GROUP=$(USER_GROUP) -v $(PWD):/build/garm:z $(IMAGE_TAG) /build-static.sh
- @echo Binaries are available in $(PWD)/bin
+##@ Build
-build:
+.PHONY : build-static test install-lint-deps lint go-test fmt fmtcheck verify-vendor verify create-release-files release
+build-static: ## Build garm statically
+ @echo Building garm
+ $(IMAGE_BUILDER) build $(EXTRA_ARGS) --tag $(IMAGE_TAG) -f Dockerfile.build-static .
+ mkdir -p build
+ $(IMAGE_BUILDER) run --rm -e USER_ID=$(USER_ID) -e GARM_REF=$(GARM_REF) -e USER_GROUP=$(USER_GROUP) -v $(PWD)/build:/build/output:z $(IMAGE_TAG) /build-static.sh
+ @echo Binaries are available in $(PWD)/build
+
+clean: ## Clean up build artifacts
+ @rm -rf ./bin ./build ./release
+
+.PHONY: build
+build: ## Build garm
@echo Building garm ${VERSION}
$(shell mkdir -p ./bin)
- @$(GO) build -ldflags "-s -w -X main.Version=${VERSION}" -tags osusergo,netgo,sqlite_omit_load_extension -o bin/garm ./cmd/garm
- @$(GO) build -ldflags "-s -w -X github.com/cloudbase/garm/cmd/garm-cli/cmd.Version=${VERSION}" -tags osusergo,netgo,sqlite_omit_load_extension -o bin/garm-cli ./cmd/garm-cli
+ @$(GO) build -ldflags "-s -w -X github.com/cloudbase/garm/util/appdefaults.Version=${VERSION}" -tags osusergo,netgo,sqlite_omit_load_extension -o bin/garm ./cmd/garm
+ @$(GO) build -ldflags "-s -w -X github.com/cloudbase/garm/util/appdefaults.Version=${VERSION}" -tags osusergo,netgo,sqlite_omit_load_extension -o bin/garm-cli ./cmd/garm-cli
@echo Binaries are available in $(PWD)/bin
-test: verify go-test
+.PHONY: build-webui
+build-webui:
+ @echo Building GARM web ui
+ ./build-webapp.sh
+ rm -rf webapp/assets/_app
+ cp -r webapp/build/* webapp/assets/
-install-lint-deps:
- @$(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+.PHONY: generate
+generate: ## Run go generate after checking required tools are in PATH
+ @echo Checking required tools...
+ @which openapi-generator-cli > /dev/null || (echo "Error: openapi-generator-cli not found in PATH" && exit 1)
+ @echo Running go generate
+ @$(GO) generate ./...
-lint:
- @golangci-lint run --timeout=8m --build-tags testing
+test: verify go-test ## Run tests
-go-test:
- @$(GO) test -race -mod=vendor -tags testing -v $(TEST_ARGS) -timeout=15m -parallel=4 -count=1 ./...
+##@ Release
+create-release-files:
+ ./scripts/make-release.sh
-fmt:
- @$(GO) fmt $$(go list ./...)
+release: build-static create-release-files ## Create a release
-fmtcheck:
- @gofmt -l -s $$(go list ./... | sed 's|github.com/cloudbase/garm/||g') | grep ".*\.go"; if [ "$$?" -eq 0 ]; then echo "gofmt check failed; please run gofmt -w -s"; exit 1;fi
+##@ Lint / Verify
+.PHONY: lint
+lint: golangci-lint $(GOLANGCI_LINT) ## Run linting.
+ $(GOLANGCI_LINT) run -v --build-tags=testing,integration $(GOLANGCI_LINT_EXTRA_ARGS)
+
+.PHONY: lint-fix
+lint-fix: golangci-lint $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linte
+ GOLANGCI_LINT_EXTRA_ARGS=--fix $(MAKE) lint
verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date
$(eval TMPDIR := $(shell mktemp -d))
- @cp -R ${ROOTDIR} ${TMPDIR}
+ @cp -R ${ROOTDIR} ${TMPDIR}/.
@(cd ${TMPDIR}/garm && ${GO} mod tidy)
@diff -r -u -q ${ROOTDIR} ${TMPDIR}/garm >/dev/null 2>&1; if [ "$$?" -ne 0 ];then echo "please run: go mod tidy && go mod vendor"; exit 1; fi
@rm -rf ${TMPDIR}
-verify: verify-vendor lint fmtcheck
+verify: verify-vendor lint fmtcheck ## Run all verify-* targets
+
+integration: build ## Run integration tests
+ function cleanup {
+ if [ -e "$$GITHUB_ENV" ];then
+ source $$GITHUB_ENV
+ fi
+ ./test/integration/scripts/taredown_garm.sh
+ $(GO) run ./test/integration/gh_cleanup/main.go
+ }
+ trap cleanup EXIT
+ @./test/integration/scripts/setup-garm.sh
+ @$(GO) test -v ./test/integration/. -timeout=30m -tags=integration
+
+##@ Development
+
+go-test: ## Run tests
+ @$(GO) test -race -mod=vendor -tags testing -v $(TEST_ARGS) -timeout=15m -parallel=4 -count=1 ./...
+
+fmt: ## Run go fmt against code.
+ @$(GO) fmt $$(go list ./...)
+
+webui-test:
+ (cd webapp && npm install)
+ (cd webapp && npm run test:run)
+
+##@ Build Dependencies
+
+## Location to install dependencies to
+LOCALBIN ?= $(shell pwd)/bin
+$(LOCALBIN):
+ mkdir -p $(LOCALBIN)
+
+## Tool Binaries
+GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint
+
+## Tool Versions
+GOLANGCI_LINT_VERSION ?= v1.64.8
+
+.PHONY: golangci-lint
+golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. If wrong version is installed, it will be overwritten.
+$(GOLANGCI_LINT): $(LOCALBIN)
+ test -s $(LOCALBIN)/golangci-lint && $(LOCALBIN)/golangci-lint --version | grep -q $(GOLANGCI_LINT_VERSION) || \
+ GOBIN=$(LOCALBIN) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
diff --git a/README.md b/README.md
index 8e53094e..24fbbcc4 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,59 @@
+
+
+
+
+
+
# GitHub Actions Runner Manager (GARM)
[](https://github.com/cloudbase/garm/actions/workflows/go-tests.yml)
+
+
+- [GitHub Actions Runner Manager GARM](#github-actions-runner-manager-garm)
+ - [About GARM](#about-garm)
+ - [Join us on slack](#join-us-on-slack)
+ - [Installing](#installing)
+ - [Quickstart](#quickstart)
+ - [Installing on Kubernetes](#installing-on-kubernetes)
+ - [Configuring GARM for GHES](#configuring-garm-for-ghes)
+ - [Configuring GARM for Gitea](#configuring-garm-for-gitea)
+ - [Enabling the web UI](#enabling-the-web-ui)
+ - [Using GARM](#using-garm)
+ - [Supported providers](#supported-providers)
+ - [Installing external providers](#installing-external-providers)
+ - [Optimizing your runners](#optimizing-your-runners)
+ - [Write your own provider](#write-your-own-provider)
+
+
+
+## About GARM
+
Welcome to GARM!
-Garm enables you to create and automatically maintain pools of [self-hosted GitHub runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners), with autoscaling that can be used inside your github workflow runs.
+GARM enables you to create and automatically maintain pools of self-hosted runners in both [Github](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners) and [Gitea](https://github.com/go-gitea/gitea/) with auto-scaling that can be used inside your workflow runs.
-The goal of ```GARM``` is to be simple to set up, simple to configure and simple to use. It is a single binary that can run on any GNU/Linux machine without any other requirements other than the providers it creates the runners in. It is intended to be easy to deploy in any environment and can create runners in any system you can write a provider for. There is no complicated setup process and no extremely complex concepts to understand. Once set up, it's meant to stay out of your way.
+The goal of ```GARM``` is to be simple to set up, simple to configure and simple to use. The server itself is a single binary that can run on any GNU/Linux machine without any other requirements other than the providers you want to enable in your setup. It is intended to be easy to deploy in any environment and can create runners in virtually any system you can write a provider for (if one does not alreay exist). There is no complicated setup process and no extremely complex concepts to understand. Once set up, it's meant to stay out of your way.
-Garm supports creating pools on either GitHub itself or on your own deployment of [GitHub Enterprise Server](https://docs.github.com/en/enterprise-server@3.5/admin/overview/about-github-enterprise-server). For instructions on how to use ```GARM``` with GHE, see the [credentials](/doc/github_credentials.md) section of the documentation.
+Through the use of providers, `GARM` can create runners in a variety of environments using the same `GARM` instance. Whether you want to create runners in your OpenStack cloud, your Azure cloud or your Kubernetes cluster, that is easily achieved by installing the appropriate providers, configuring them in `GARM` and creating pools that use them. You can create zero-runner pools for instances with high costs (large VMs, GPU enabled instances, etc) and have them spin up on demand, or you can create large pools of eagerly created k8s backed runners that can be used for your CI/CD pipelines at a moment's notice. You can mix them up and create pools in any combination of providers or resource allocations you want.
+
+GARM supports two modes of operation:
+
+* Pools
+* Scale sets
+
+Here is a brief architectural diagram of how pools work and how GARM reacts to workflows triggered in GitHub (click the image to see a larger version):
+
+
+
+
+**Scale sets** work differently. While pools (as they are defined in GARM) rely on webhooks to know when a job was started and GARM needs to internally make the right decission in terms of which pool should handle that runner, scale sets have a lot of the scheduling and decission making logic done in GitHub itself.
+
+> [!IMPORTANT]
+> The README and documentation in the `main` branch are relevant to the not yet released code that is present in `main`. Following the documentation from the `main` branch for a stable release of GARM, may lead to errors. To view the documentation for the latest stable release, please switch to the appropriate tag. For information about setting up `v0.1.6`, please refer to the [v0.1.6 tag](https://github.com/cloudbase/garm/tree/v0.1.6).
+
+> [!CAUTION]
+> The `main` branch holds the latest code and is not guaranteed to be stable. If you are looking for a stable release, please check the releases page. If you plan to use the `main` branch, please do so on a new instance. Do not upgrade from a stable release to `main`.
## Join us on slack
@@ -18,37 +63,68 @@ Whether you're running into issues or just want to drop by and say "hi", feel fr
## Installing
+### Quickstart
+
Check out the [quickstart](/doc/quickstart.md) document for instructions on how to install ```GARM```. If you'd like to build from source, check out the [building from source](/doc/building_from_source.md) document.
-## Installing external providers
+### Installing on Kubernetes
-External providers are binaries that GARM calls into to create runners in a particular IaaS. There are currently two external providers available:
+Thanks to the efforts of the amazing folks at [@mercedes-benz](https://github.com/mercedes-benz/), GARM can now be integrated into k8s via their operator. Check out the [GARM operator](https://github.com/mercedes-benz/garm-operator/) for more details.
-* [OpenStack](https://github.com/cloudbase/garm-provider-openstack)
+## Configuring GARM for GHES
+
+GARM supports creating pools and scale sets in either GitHub itself or in your own deployment of [GitHub Enterprise Server](https://docs.github.com/en/enterprise-server@3.10/admin/overview/about-github-enterprise-server). For instructions on how to use ```GARM``` with GHE, see the [credentials](/doc/github_credentials.md) section of the documentation.
+
+## Configuring GARM for Gitea
+
+GARM now has support for Gitea (>=1.24.0). For information on getting started with Gitea, see the [Gitea quickstart](/doc/gitea.md) document.
+
+## Enabling the web UI
+
+GARM now ships with a single page application. To enable it, add the following to your GARM config:
+
+```toml
+[apiserver.webui]
+ enable = true
+```
+
+Check the [README.md](/webapp/README.md) file for details on the web UI.
+
+## Using GARM
+
+GARM is designed with simplicity in mind. At least we try to keep it as simple as possible. We're aware that adding a new tool in your workflow can be painful, especially when you already have to deal with so many. The cognitive load for OPS has reached a level where it feels overwhelming at times to even wrap your head around a new tool. As such, we believe that tools should be simple, should take no more than a few hours to understand and set up and if you absolutely need to interact with the tool, it should be as intuitive as possible. Although we try our best to make this happen, we're aware that GARM has some rough edges, especially for new users. If you encounter issues or feel like the setup process was too complicated, please let us know. We're always looking to improve the user experience.
+
+We've written a short introduction into some of the commands that GARM has and some of the concepts involved in setting up GARM, managing runners and how GitHub does some of the things it does.
+
+[You can find it here](/doc/using_garm.md).
+
+Please, feel free to [open an issue](https://github.com/cloudbase/garm/issues/new) if you find the documentation lacking and would like more info. Sometimes we forget the challenges that new users face as we're so close to the code and how it works. Any feedback is welcome and we're always looking to improve the documentation.
+
+## Supported providers
+
+GARM uses providers to create runners in a particular IaaS. The providers are external executables that GARM calls into to create runners. Before you can create runners, you'll need to install at least one provider.
+
+### Installing external providers
+
+External providers are binaries that GARM calls into to create runners in a particular IaaS. There are several external providers available:
+
+* [Akamai/Linode](https://github.com/flatcar/garm-provider-linode) - Experimental
+* [Amazon EC2](https://github.com/cloudbase/garm-provider-aws)
* [Azure](https://github.com/cloudbase/garm-provider-azure)
+* [Equinix Metal](https://github.com/cloudbase/garm-provider-equinix)
+* [Google Cloud Platform (GCP)](https://github.com/cloudbase/garm-provider-gcp)
+* [Incus](https://github.com/cloudbase/garm-provider-incus)
+* [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider!
+* [LXD](https://github.com/cloudbase/garm-provider-lxd)
+* [OpenStack](https://github.com/cloudbase/garm-provider-openstack)
+* [Oracle Cloud Infrastructure (OCI)](https://github.com/cloudbase/garm-provider-oci)
Follow the instructions in the README of each provider to install them.
-## Configuration
-
-The ```GARM``` configuration is a simple ```toml```. The sample config file in [the testdata folder](/testdata/config.toml) is fairly well commented and should be enough to get you started. The configuration file is split into several sections, each of which is documented in its own page. The sections are:
-
-* [The default section](/doc/config_default.md)
-* [Database](/doc/database.md)
-* [Github credentials](/doc/github_credentials.md)
-* [Providers](/doc/providers.md)
-* [Metrics](/doc/config_metrics.md)
-* [JWT authentication](/doc/config_jwt_auth.md)
-* [API server](/doc/config_api_server.md)
-
## Optimizing your runners
If you would like to optimize the startup time of new instance, take a look at the [performance considerations](/doc/performance_considerations.md) page.
## Write your own provider
-The providers are interfaces between ```GARM``` and a particular IaaS in which we spin up GitHub Runners. These providers can be either **native** or **external**. The **native** providers are written in ```Go```, and must implement [the interface defined here](https://github.com/cloudbase/garm/blob/main/runner/common/provider.go#L22-L39). **External** providers can be written in any language, as they are in the form of an external executable that ```GARM``` calls into.
-
-There is currently one **native** provider for [LXD](https://linuxcontainers.org/lxd/) and two **external** providers for [Openstack and Azure](/contrib/providers.d/).
-
-If you want to write your own provider, you can choose to write a native one, or implement an **external** one. The easiest one to write is probably an **external** provider. Please see the [Writing an external provider](/doc/external_provider.md) document for details. Also, feel free to inspect the two available external providers in this repository.
+The providers are interfaces between ```GARM``` and a particular IaaS in which we spin up GitHub Runners. **External** providers can be written in any language, as they are in the form of an external executable that ```GARM``` calls into. Please see the [Writing an external provider](/doc/external_provider.md) document for details. Also, feel free to inspect the two available sample external providers in this repository.
diff --git a/apiserver/controllers/controllers.go b/apiserver/controllers/controllers.go
index 2c1fd42a..019671eb 100644
--- a/apiserver/controllers/controllers.go
+++ b/apiserver/controllers/controllers.go
@@ -15,26 +15,60 @@
package controllers
import (
+ "context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "log"
+ "log/slog"
"net/http"
+ "net/url"
"strings"
+ "github.com/gorilla/mux"
+ "github.com/gorilla/websocket"
+
gErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/apiserver/params"
"github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/metrics"
runnerParams "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner"
+ "github.com/cloudbase/garm/runner" //nolint:typecheck
+ garmUtil "github.com/cloudbase/garm/util"
wsWriter "github.com/cloudbase/garm/websocket"
-
- "github.com/gorilla/websocket"
- "github.com/pkg/errors"
+ "github.com/cloudbase/garm/workers/websocket/events"
)
-func NewAPIController(r *runner.Runner, authenticator *auth.Authenticator, hub *wsWriter.Hub) (*APIController, error) {
+func NewAPIController(r *runner.Runner, authenticator *auth.Authenticator, hub *wsWriter.Hub, apiCfg config.APIServer) (*APIController, error) {
+ controllerInfo, err := r.GetControllerInfo(auth.GetAdminContext(context.Background()))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get controller info: %w", err)
+ }
+ var checkOrigin func(r *http.Request) bool
+ if len(apiCfg.CORSOrigins) > 0 {
+ checkOrigin = func(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ for _, val := range apiCfg.CORSOrigins {
+ corsVal, err := url.Parse(val)
+ if err != nil {
+ continue
+ }
+ if garmUtil.ASCIIEqualFold(u.Host, corsVal.Host) {
+ return true
+ }
+ }
+ return false
+ }
+ }
return &APIController{
r: r,
auth: authenticator,
@@ -42,37 +76,38 @@ func NewAPIController(r *runner.Runner, authenticator *auth.Authenticator, hub *
upgrader: websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 16384,
+ CheckOrigin: checkOrigin,
},
+ controllerID: controllerInfo.ControllerID.String(),
}, nil
}
type APIController struct {
- r *runner.Runner
- auth *auth.Authenticator
- hub *wsWriter.Hub
- upgrader websocket.Upgrader
+ r *runner.Runner
+ auth *auth.Authenticator
+ hub *wsWriter.Hub
+ upgrader websocket.Upgrader
+ controllerID string
}
-func handleError(w http.ResponseWriter, err error) {
- w.Header().Add("Content-Type", "application/json")
- origErr := errors.Cause(err)
+func handleError(ctx context.Context, w http.ResponseWriter, err error) {
+ w.Header().Set("Content-Type", "application/json")
apiErr := params.APIErrorResponse{
- Details: origErr.Error(),
+ Details: err.Error(),
}
-
- switch origErr.(type) {
- case *gErrors.NotFoundError:
+ switch {
+ case errors.Is(err, gErrors.ErrNotFound):
w.WriteHeader(http.StatusNotFound)
apiErr.Error = "Not Found"
- case *gErrors.UnauthorizedError:
+ case errors.Is(err, gErrors.ErrUnauthorized):
w.WriteHeader(http.StatusUnauthorized)
apiErr.Error = "Not Authorized"
// Don't include details on 401 errors.
apiErr.Details = ""
- case *gErrors.BadRequestError:
+ case errors.Is(err, gErrors.ErrBadRequest):
w.WriteHeader(http.StatusBadRequest)
apiErr.Error = "Bad Request"
- case *gErrors.DuplicateUserError, *gErrors.ConflictError:
+ case errors.Is(err, gErrors.ErrDuplicateEntity), errors.Is(err, &gErrors.ConflictError{}):
w.WriteHeader(http.StatusConflict)
apiErr.Error = "Conflict"
default:
@@ -83,72 +118,122 @@ func handleError(w http.ResponseWriter, err error) {
}
if err := json.NewEncoder(w).Encode(apiErr); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
-func (a *APIController) webhookMetricLabelValues(valid, reason string) []string {
- controllerInfo, err := a.r.GetControllerInfo(auth.GetAdminContext())
- if err != nil {
- log.Printf("failed to get controller info: %s", err)
- // If labels are empty, not attempt will be made to record webhook.
- return []string{}
- }
- return []string{
- valid, reason,
- controllerInfo.Hostname, controllerInfo.ControllerID.String(),
- }
-}
-
-func (a *APIController) handleWorkflowJobEvent(w http.ResponseWriter, r *http.Request) {
+func (a *APIController) handleWorkflowJobEvent(ctx context.Context, w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- handleError(w, gErrors.NewBadRequestError("invalid post body: %s", err))
+ handleError(ctx, w, gErrors.NewBadRequestError("invalid post body: %s", err))
return
}
signature := r.Header.Get("X-Hub-Signature-256")
hookType := r.Header.Get("X-Github-Hook-Installation-Target-Type")
+ giteaTargetType := r.Header.Get("X-Gitea-Hook-Installation-Target-Type")
- var labelValues []string
- defer func() {
- if len(labelValues) == 0 {
+ forgeType := runnerParams.GithubEndpointType
+ if giteaTargetType != "" {
+ forgeType = runnerParams.GiteaEndpointType
+ hookType = giteaTargetType
+ }
+
+ if err := a.r.DispatchWorkflowJob(hookType, signature, forgeType, body); err != nil {
+ switch {
+ case errors.Is(err, gErrors.ErrNotFound):
+ metrics.WebhooksReceived.WithLabelValues(
+ "false", // label: valid
+ "owner_unknown", // label: reason
+ ).Inc()
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "got not found error from DispatchWorkflowJob. webhook not meant for us?")
return
- }
- if err := metrics.RecordWebhookWithLabels(labelValues...); err != nil {
- log.Printf("failed to record metric: %s", err)
- }
- }()
-
- if err := a.r.DispatchWorkflowJob(hookType, signature, body); err != nil {
- if errors.Is(err, gErrors.ErrNotFound) {
- labelValues = a.webhookMetricLabelValues("false", "owner_unknown")
- log.Printf("got not found error from DispatchWorkflowJob. webhook not meant for us?: %q", err)
- return
- } else if strings.Contains(err.Error(), "signature") { // TODO: check error type
- labelValues = a.webhookMetricLabelValues("false", "signature_invalid")
- } else {
- labelValues = a.webhookMetricLabelValues("false", "unknown")
+ case strings.Contains(err.Error(), "signature"):
+ // nolint:golangci-lint,godox TODO: check error type
+ metrics.WebhooksReceived.WithLabelValues(
+ "false", // label: valid
+ "signature_invalid", // label: reason
+ ).Inc()
+ default:
+ metrics.WebhooksReceived.WithLabelValues(
+ "false", // label: valid
+ "unknown", // label: reason
+ ).Inc()
}
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
- labelValues = a.webhookMetricLabelValues("true", "")
+ metrics.WebhooksReceived.WithLabelValues(
+ "true", // label: valid
+ "", // label: reason
+ ).Inc()
}
-func (a *APIController) CatchAll(w http.ResponseWriter, r *http.Request) {
+func (a *APIController) WebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ controllerID, ok := vars["controllerID"]
+ // If the webhook URL includes a controller ID, we validate that it's meant for us. We still
+ // support bare webhook URLs, which are tipically configured manually by the user.
+ // The controllerID suffixed webhook URL is useful when configuring the webhook for an entity
+ // via garm. We cannot tag a webhook URL on github, so there is no way to determine ownership.
+ // Using a controllerID suffix is a simple way to denote ownership.
+ if ok && controllerID != a.controllerID {
+ slog.InfoContext(ctx, "ignoring webhook meant for foreign controller", "req_controller_id", controllerID)
+ return
+ }
+
headers := r.Header.Clone()
event := runnerParams.Event(headers.Get("X-Github-Event"))
switch event {
case runnerParams.WorkflowJobEvent:
- a.handleWorkflowJobEvent(w, r)
+ a.handleWorkflowJobEvent(ctx, w, r)
+ case runnerParams.PingEvent:
+ // Ignore ping event. We may want to save the ping in the github entity table in the future.
default:
- log.Printf("ignoring unknown event %s", util.SanitizeLogEntry(string(event)))
+ slog.DebugContext(ctx, "ignoring unknown event", "gh_event", util.SanitizeLogEntry(string(event)))
+ }
+}
+
+func (a *APIController) EventsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ if !auth.IsAdmin(ctx) {
+ w.WriteHeader(http.StatusForbidden)
+ if _, err := w.Write([]byte("events are available to admin users")); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
return
}
+
+ conn, err := a.upgrader.Upgrade(w, r, nil)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error upgrading to websockets")
+ return
+ }
+ defer conn.Close()
+
+ wsClient, err := wsWriter.NewClient(ctx, conn)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create new client")
+ return
+ }
+ defer wsClient.Stop()
+
+ eventHandler, err := events.NewHandler(ctx, wsClient)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create new event handler")
+ return
+ }
+
+ if err := eventHandler.Start(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to start event handler")
+ return
+ }
+ <-eventHandler.Done()
}
func (a *APIController) WSHandler(writer http.ResponseWriter, req *http.Request) {
@@ -156,49 +241,54 @@ func (a *APIController) WSHandler(writer http.ResponseWriter, req *http.Request)
if !auth.IsAdmin(ctx) {
writer.WriteHeader(http.StatusForbidden)
if _, err := writer.Write([]byte("you need admin level access to view logs")); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if a.hub == nil {
- handleError(writer, gErrors.NewBadRequestError("log streamer is disabled"))
+ handleError(ctx, writer, gErrors.NewBadRequestError("log streamer is disabled"))
return
}
conn, err := a.upgrader.Upgrade(writer, req, nil)
if err != nil {
- log.Printf("error upgrading to websockets: %v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error upgrading to websockets")
return
}
+ defer conn.Close()
- // TODO (gsamfira): Handle ExpiresAt. Right now, if a client uses
- // a valid token to authenticate, and keeps the websocket connection
- // open, it will allow that client to stream logs via websockets
- // until the connection is broken. We need to forcefully disconnect
- // the client once the token expires.
- client, err := wsWriter.NewClient(conn, a.hub)
+ client, err := wsWriter.NewClient(ctx, conn)
if err != nil {
- log.Printf("failed to create new client: %v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create new client")
return
}
if err := a.hub.Register(client); err != nil {
- log.Printf("failed to register new client: %v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to register new client")
return
}
- client.Go()
+ defer a.hub.Unregister(client)
+
+ if err := client.Start(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to start client")
+ return
+ }
+ <-client.Done()
+ slog.Info("client disconnected", "client_id", client.ID())
}
// NotFoundHandler is returned when an invalid URL is acccessed
func (a *APIController) NotFoundHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
apiErr := params.APIErrorResponse{
Details: "Resource not found",
Error: "Not found",
}
- w.WriteHeader(http.StatusNotFound)
+
w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusNotFound)
if err := json.NewEncoder(w).Encode(apiErr); err != nil {
- log.Printf("failet to write response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failet to write response")
}
}
@@ -213,19 +303,19 @@ func (a *APIController) MetricsTokenHandler(w http.ResponseWriter, r *http.Reque
ctx := r.Context()
if !auth.IsAdmin(ctx) {
- handleError(w, gErrors.ErrUnauthorized)
+ handleError(ctx, w, gErrors.ErrUnauthorized)
return
}
token, err := a.auth.GetJWTMetricsToken(ctx)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(runnerParams.JWTResponse{Token: token})
if err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -246,32 +336,32 @@ func (a *APIController) MetricsTokenHandler(w http.ResponseWriter, r *http.Reque
//
// LoginHandler returns a jwt token
func (a *APIController) LoginHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
var loginInfo runnerParams.PasswordLoginParams
if err := json.NewDecoder(r.Body).Decode(&loginInfo); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
if err := loginInfo.Validate(); err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
- ctx := r.Context()
ctx, err := a.auth.AuthenticateUser(ctx, loginInfo)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
tokenString, err := a.auth.GetJWTToken(ctx)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(runnerParams.JWTResponse{Token: tokenString}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -290,49 +380,27 @@ func (a *APIController) LoginHandler(w http.ResponseWriter, r *http.Request) {
// 200: User
// 400: APIErrorResponse
func (a *APIController) FirstRunHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
if a.auth.IsInitialized() {
err := gErrors.NewConflictError("already initialized")
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
- ctx := r.Context()
-
var newUserParams runnerParams.NewUserParams
if err := json.NewDecoder(r.Body).Decode(&newUserParams); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
newUser, err := a.auth.InitController(ctx, newUserParams)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(newUser); err != nil {
- log.Printf("failed to encode response: %q", err)
- }
-}
-
-// swagger:route GET /credentials credentials ListCredentials
-//
-// List all credentials.
-//
-// Responses:
-// 200: Credentials
-// 400: APIErrorResponse
-func (a *APIController) ListCredentials(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
- creds, err := a.r.ListCredentials(ctx)
- if err != nil {
- handleError(w, err)
- return
- }
-
- w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(creds); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -347,13 +415,13 @@ func (a *APIController) ListProviders(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
providers, err := a.r.ListProviders(ctx)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(providers); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -368,12 +436,72 @@ func (a *APIController) ListAllJobs(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
jobs, err := a.r.ListAllJobs(ctx)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(jobs); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /controller-info controllerInfo ControllerInfo
+//
+// Get controller info.
+//
+// Responses:
+// 200: ControllerInfo
+// 409: APIErrorResponse
+func (a *APIController) ControllerInfoHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ info, err := a.r.GetControllerInfo(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route PUT /controller controller UpdateController
+//
+// Update controller.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when updating the controller.
+// type: UpdateControllerParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ControllerInfo
+// 400: APIErrorResponse
+func (a *APIController) UpdateControllerHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ var updateParams runnerParams.UpdateControllerParams
+ if err := json.NewDecoder(r.Body).Decode(&updateParams); err != nil {
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := updateParams.Validate(); err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ info, err := a.r.UpdateController(ctx, updateParams)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/enterprises.go b/apiserver/controllers/enterprises.go
index 6a015df2..b4b3e528 100644
--- a/apiserver/controllers/enterprises.go
+++ b/apiserver/controllers/enterprises.go
@@ -16,14 +16,14 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
+ "github.com/gorilla/mux"
+
gErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/apiserver/params"
runnerParams "github.com/cloudbase/garm/params"
-
- "github.com/gorilla/mux"
)
// swagger:route POST /enterprises enterprises CreateEnterprise
@@ -45,20 +45,20 @@ func (a *APIController) CreateEnterpriseHandler(w http.ResponseWriter, r *http.R
var enterpriseData runnerParams.CreateEnterpriseParams
if err := json.NewDecoder(r.Body).Decode(&enterpriseData); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
enterprise, err := a.r.CreateEnterprise(ctx, enterpriseData)
if err != nil {
- log.Printf("error creating enterprise: %+v", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating enterprise")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(enterprise); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -66,22 +66,39 @@ func (a *APIController) CreateEnterpriseHandler(w http.ResponseWriter, r *http.R
//
// List all enterprises.
//
+// Parameters:
+// + name: name
+// description: Exact enterprise name to filter by
+// type: string
+// in: query
+// required: false
+//
+// + name: endpoint
+// description: Exact endpoint name to filter by
+// type: string
+// in: query
+// required: false
+//
// Responses:
// 200: Enterprises
// default: APIErrorResponse
func (a *APIController) ListEnterprisesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- enterprise, err := a.r.ListEnterprises(ctx)
+ filter := runnerParams.EnterpriseFilter{
+ Name: r.URL.Query().Get("name"),
+ Endpoint: r.URL.Query().Get("endpoint"),
+ }
+ enterprise, err := a.r.ListEnterprises(ctx, filter)
if err != nil {
- log.Printf("listing enterprise: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing enterprise")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(enterprise); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -110,21 +127,21 @@ func (a *APIController) GetEnterpriseByIDHandler(w http.ResponseWriter, r *http.
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
enterprise, err := a.r.GetEnterpriseByID(ctx, enterpriseID)
if err != nil {
- log.Printf("fetching enterprise: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching enterprise")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(enterprise); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -152,20 +169,19 @@ func (a *APIController) DeleteEnterpriseHandler(w http.ResponseWriter, r *http.R
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeleteEnterprise(ctx, enterpriseID); err != nil {
- log.Printf("removing enterprise: %+v", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing enterprise")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
// swagger:route PUT /enterprises/{enterpriseID} enterprises UpdateEnterprise
@@ -198,27 +214,27 @@ func (a *APIController) UpdateEnterpriseHandler(w http.ResponseWriter, r *http.R
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var updatePayload runnerParams.UpdateEntityParams
if err := json.NewDecoder(r.Body).Decode(&updatePayload); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
enterprise, err := a.r.UpdateEnterprise(ctx, enterpriseID, updatePayload)
if err != nil {
- log.Printf("error updating enterprise: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error updating enterprise: %s")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(enterprise); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -253,28 +269,84 @@ func (a *APIController) CreateEnterprisePoolHandler(w http.ResponseWriter, r *ht
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.CreatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.CreateEnterprisePool(ctx, enterpriseID, poolData)
if err != nil {
- log.Printf("error creating enterprise pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating enterprise pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /enterprises/{enterpriseID}/scalesets enterprises scalesets CreateEnterpriseScaleSet
+//
+// Create enterprise pool with the parameters given.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the enterprise scale set.
+// type: CreateScaleSetParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) CreateEnterpriseScaleSetHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ enterpriseID, ok := vars["enterpriseID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No enterprise ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var scaleSetData runnerParams.CreateScaleSetParams
+ if err := json.NewDecoder(r.Body).Decode(&scaleSetData); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.CreateEntityScaleSet(ctx, runnerParams.ForgeEntityTypeEnterprise, enterpriseID, scaleSetData)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating enterprise scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -302,23 +374,64 @@ func (a *APIController) ListEnterprisePoolsHandler(w http.ResponseWriter, r *htt
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pools, err := a.r.ListEnterprisePools(ctx, enterpriseID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pools); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /enterprises/{enterpriseID}/scalesets enterprises scalesets ListEnterpriseScaleSets
+//
+// List enterprise scale sets.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ScaleSets
+// default: APIErrorResponse
+func (a *APIController) ListEnterpriseScaleSetsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ enterpriseID, ok := vars["enterpriseID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No enterprise ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
}
+ scaleSets, err := a.r.ListEntityScaleSets(ctx, runnerParams.ForgeEntityTypeEnterprise, enterpriseID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing scale sets")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSets); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
}
// swagger:route GET /enterprises/{enterpriseID}/pools/{poolID} enterprises pools GetEnterprisePool
@@ -352,21 +465,21 @@ func (a *APIController) GetEnterprisePoolHandler(w http.ResponseWriter, r *http.
Error: "Bad Request",
Details: "No enterprise or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pool, err := a.r.GetEnterprisePoolByID(ctx, enterpriseID, poolID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -401,20 +514,19 @@ func (a *APIController) DeleteEnterprisePoolHandler(w http.ResponseWriter, r *ht
Error: "Bad Request",
Details: "No enterprise or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeleteEnterprisePool(ctx, enterpriseID, poolID); err != nil {
- log.Printf("removing pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
// swagger:route PUT /enterprises/{enterpriseID}/pools/{poolID} enterprises pools UpdateEnterprisePool
@@ -455,27 +567,27 @@ func (a *APIController) UpdateEnterprisePoolHandler(w http.ResponseWriter, r *ht
Error: "Bad Request",
Details: "No enterprise or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.UpdatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.UpdateEnterprisePool(ctx, enterpriseID, poolID, poolData)
if err != nil {
- log.Printf("error creating enterprise pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating enterprise pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/gitea_credentials.go b/apiserver/controllers/gitea_credentials.go
new file mode 100644
index 00000000..777be982
--- /dev/null
+++ b/apiserver/controllers/gitea_credentials.go
@@ -0,0 +1,241 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "math"
+ "net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+// swagger:route GET /gitea/credentials credentials ListGiteaCredentials
+//
+// List all credentials.
+//
+// Responses:
+// 200: Credentials
+// 400: APIErrorResponse
+func (a *APIController) ListGiteaCredentials(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ creds, err := a.r.ListGiteaCredentials(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(creds); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /gitea/credentials credentials CreateGiteaCredentials
+//
+// Create a Gitea credential.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating a Gitea credential.
+// type: CreateGiteaCredentialsParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) CreateGiteaCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var params params.CreateGiteaCredentialsParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.CreateGiteaCredentials(ctx, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create Gitea credential")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /gitea/credentials/{id} credentials GetGiteaCredentials
+//
+// Get a Gitea credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the Gitea credential.
+// type: integer
+// in: path
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) GetGiteaCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.GetGiteaCredentials(ctx, uint(id))
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get Gitea credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /gitea/credentials/{id} credentials DeleteGiteaCredentials
+//
+// Delete a Gitea credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the Gitea credential.
+// type: integer
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteGiteaCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := a.r.DeleteGiteaCredentials(ctx, uint(id)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete Gitea credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// swagger:route PUT /gitea/credentials/{id} credentials UpdateGiteaCredentials
+//
+// Update a Gitea credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the Gitea credential.
+// type: integer
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating a Gitea credential.
+// type: UpdateGiteaCredentialsParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) UpdateGiteaCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var params params.UpdateGiteaCredentialsParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.UpdateGiteaCredentials(ctx, uint(id), params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to update Gitea credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/gitea_endpoints.go b/apiserver/controllers/gitea_endpoints.go
new file mode 100644
index 00000000..67e85178
--- /dev/null
+++ b/apiserver/controllers/gitea_endpoints.go
@@ -0,0 +1,199 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "net/http"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+// swagger:route POST /gitea/endpoints endpoints CreateGiteaEndpoint
+//
+// Create a Gitea Endpoint.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating a Gitea endpoint.
+// type: CreateGiteaEndpointParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) CreateGiteaEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var params params.CreateGiteaEndpointParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ endpoint, err := a.r.CreateGiteaEndpoint(ctx, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create Gitea endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /gitea/endpoints endpoints ListGiteaEndpoints
+//
+// List all Gitea Endpoints.
+//
+// Responses:
+// 200: ForgeEndpoints
+// default: APIErrorResponse
+func (a *APIController) ListGiteaEndpoints(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ endpoints, err := a.r.ListGiteaEndpoints(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to list Gitea endpoints")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoints); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /gitea/endpoints/{name} endpoints GetGiteaEndpoint
+//
+// Get a Gitea Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the Gitea endpoint.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) GetGiteaEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+ endpoint, err := a.r.GetGiteaEndpoint(ctx, name)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get Gitea endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /gitea/endpoints/{name} endpoints DeleteGiteaEndpoint
+//
+// Delete a Gitea Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the Gitea endpoint.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteGiteaEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+ if err := a.r.DeleteGiteaEndpoint(ctx, name); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete Gitea endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// swagger:route PUT /gitea/endpoints/{name} endpoints UpdateGiteaEndpoint
+//
+// Update a Gitea Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the Gitea endpoint.
+// type: string
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating a Gitea endpoint.
+// type: UpdateGiteaEndpointParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) UpdateGiteaEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var params params.UpdateGiteaEndpointParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ endpoint, err := a.r.UpdateGiteaEndpoint(ctx, name, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to update GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/github_credentials.go b/apiserver/controllers/github_credentials.go
new file mode 100644
index 00000000..04e087e5
--- /dev/null
+++ b/apiserver/controllers/github_credentials.go
@@ -0,0 +1,242 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "math"
+ "net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+// swagger:route GET /credentials credentials ListCredentials
+// swagger:route GET /github/credentials credentials ListCredentials
+//
+// List all credentials.
+//
+// Responses:
+// 200: Credentials
+// 400: APIErrorResponse
+func (a *APIController) ListCredentials(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ creds, err := a.r.ListCredentials(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(creds); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /github/credentials credentials CreateCredentials
+//
+// Create a GitHub credential.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating a GitHub credential.
+// type: CreateGithubCredentialsParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) CreateGithubCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var params params.CreateGithubCredentialsParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.CreateGithubCredentials(ctx, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create GitHub credential")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /github/credentials/{id} credentials GetCredentials
+//
+// Get a GitHub credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the GitHub credential.
+// type: integer
+// in: path
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) GetGithubCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.GetGithubCredentials(ctx, uint(id))
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get GitHub credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /github/credentials/{id} credentials DeleteCredentials
+//
+// Delete a GitHub credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the GitHub credential.
+// type: integer
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteGithubCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := a.r.DeleteGithubCredentials(ctx, uint(id)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete GitHub credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// swagger:route PUT /github/credentials/{id} credentials UpdateCredentials
+//
+// Update a GitHub credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the GitHub credential.
+// type: integer
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating a GitHub credential.
+// type: UpdateGithubCredentialsParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) UpdateGithubCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var params params.UpdateGithubCredentialsParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.UpdateGithubCredentials(ctx, uint(id), params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to update GitHub credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/github_endpoints.go b/apiserver/controllers/github_endpoints.go
new file mode 100644
index 00000000..482f9d03
--- /dev/null
+++ b/apiserver/controllers/github_endpoints.go
@@ -0,0 +1,199 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "net/http"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+// swagger:route POST /github/endpoints endpoints CreateGithubEndpoint
+//
+// Create a GitHub Endpoint.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating a GitHub endpoint.
+// type: CreateGithubEndpointParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) CreateGithubEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var params params.CreateGithubEndpointParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ endpoint, err := a.r.CreateGithubEndpoint(ctx, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /github/endpoints endpoints ListGithubEndpoints
+//
+// List all GitHub Endpoints.
+//
+// Responses:
+// 200: ForgeEndpoints
+// default: APIErrorResponse
+func (a *APIController) ListGithubEndpoints(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ endpoints, err := a.r.ListGithubEndpoints(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to list GitHub endpoints")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoints); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /github/endpoints/{name} endpoints GetGithubEndpoint
+//
+// Get a GitHub Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the GitHub endpoint.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) GetGithubEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+ endpoint, err := a.r.GetGithubEndpoint(ctx, name)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /github/endpoints/{name} endpoints DeleteGithubEndpoint
+//
+// Delete a GitHub Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the GitHub endpoint.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteGithubEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+ if err := a.r.DeleteGithubEndpoint(ctx, name); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// swagger:route PUT /github/endpoints/{name} endpoints UpdateGithubEndpoint
+//
+// Update a GitHub Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the GitHub endpoint.
+// type: string
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating a GitHub endpoint.
+// type: UpdateGithubEndpointParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) UpdateGithubEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var params params.UpdateGithubEndpointParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ endpoint, err := a.r.UpdateGithubEndpoint(ctx, name, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to update GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/instances.go b/apiserver/controllers/instances.go
index e4011b3e..3209a5c2 100644
--- a/apiserver/controllers/instances.go
+++ b/apiserver/controllers/instances.go
@@ -16,14 +16,15 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
gErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/apiserver/params"
runnerParams "github.com/cloudbase/garm/params"
-
- "github.com/gorilla/mux"
)
// swagger:route GET /pools/{poolID}/instances instances ListPoolInstances
@@ -50,21 +51,69 @@ func (a *APIController) ListPoolInstancesHandler(w http.ResponseWriter, r *http.
Error: "Bad Request",
Details: "No pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instances, err := a.r.ListPoolInstances(ctx, poolID)
if err != nil {
- log.Printf("listing pool instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pool instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /scalesets/{scalesetID}/instances instances ListScaleSetInstances
+//
+// List runner instances in a scale set.
+//
+// Parameters:
+// + name: scalesetID
+// description: Runner scale set ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Instances
+// default: APIErrorResponse
+func (a *APIController) ListScaleSetInstancesHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ scalesetID, ok := vars["scalesetID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No pool ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+ id, err := strconv.ParseUint(scalesetID, 10, 32)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ instances, err := a.r.ListScaleSetInstances(ctx, uint(id))
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pool instances")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(instances); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -92,21 +141,21 @@ func (a *APIController) GetInstanceHandler(w http.ResponseWriter, r *http.Reques
Error: "Bad Request",
Details: "No runner name specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instance, err := a.r.GetInstance(ctx, instanceName)
if err != nil {
- log.Printf("listing instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instance); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -121,8 +170,21 @@ func (a *APIController) GetInstanceHandler(w http.ResponseWriter, r *http.Reques
// in: path
// required: true
//
-// Responses:
-// default: APIErrorResponse
+// + name: forceRemove
+// description: If true GARM will ignore any provider error when removing the runner and will continue to remove the runner from github and the GARM database.
+// type: boolean
+// in: query
+// required: false
+//
+// + name: bypassGHUnauthorized
+// description: If true GARM will ignore unauthorized errors returned by GitHub when removing a runner. This is useful if you want to clean up runners and your credentials have expired.
+// type: boolean
+// in: query
+// required: false
+//
+// Responses:
+//
+// default: APIErrorResponse
func (a *APIController) DeleteInstanceHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -133,14 +195,16 @@ func (a *APIController) DeleteInstanceHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No instance name specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- if err := a.r.ForceDeleteRunner(ctx, instanceName); err != nil {
- log.Printf("removing runner: %s", err)
- handleError(w, err)
+ forceRemove, _ := strconv.ParseBool(r.URL.Query().Get("forceRemove"))
+ bypassGHUnauthorized, _ := strconv.ParseBool(r.URL.Query().Get("bypassGHUnauthorized"))
+ if err := a.r.DeleteRunner(ctx, instanceName, forceRemove, bypassGHUnauthorized); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing runner")
+ handleError(ctx, w, err)
return
}
@@ -172,21 +236,21 @@ func (a *APIController) ListRepoInstancesHandler(w http.ResponseWriter, r *http.
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instances, err := a.r.ListRepoInstances(ctx, repoID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -214,21 +278,21 @@ func (a *APIController) ListOrgInstancesHandler(w http.ResponseWriter, r *http.R
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instances, err := a.r.ListOrgInstances(ctx, orgID)
if err != nil {
- log.Printf("listing instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -256,21 +320,21 @@ func (a *APIController) ListEnterpriseInstancesHandler(w http.ResponseWriter, r
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instances, err := a.r.ListEnterpriseInstances(ctx, enterpriseID)
if err != nil {
- log.Printf("listing instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -286,14 +350,14 @@ func (a *APIController) ListAllInstancesHandler(w http.ResponseWriter, r *http.R
instances, err := a.r.ListAllInstances(ctx)
if err != nil {
- log.Printf("listing instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -302,14 +366,14 @@ func (a *APIController) InstanceStatusMessageHandler(w http.ResponseWriter, r *h
var updateMessage runnerParams.InstanceUpdateMessage
if err := json.NewDecoder(r.Body).Decode(&updateMessage); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
if err := a.r.AddInstanceStatusMessage(ctx, updateMessage); err != nil {
- log.Printf("error saving status message: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error saving status message")
+ handleError(ctx, w, err)
return
}
@@ -317,18 +381,22 @@ func (a *APIController) InstanceStatusMessageHandler(w http.ResponseWriter, r *h
w.WriteHeader(http.StatusOK)
}
-func (a *APIController) InstanceGithubRegistrationTokenHandler(w http.ResponseWriter, r *http.Request) {
+func (a *APIController) InstanceSystemInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- token, err := a.r.GetInstanceGithubRegistrationToken(ctx)
- if err != nil {
- handleError(w, err)
+ var updateMessage runnerParams.UpdateSystemInfoParams
+ if err := json.NewDecoder(r.Body).Decode(&updateMessage); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := a.r.UpdateSystemInfo(ctx, updateMessage); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error saving status message")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
- if _, err := w.Write([]byte(token)); err != nil {
- log.Printf("failed to encode response: %q", err)
- }
}
diff --git a/apiserver/controllers/metadata.go b/apiserver/controllers/metadata.go
new file mode 100644
index 00000000..4b112b17
--- /dev/null
+++ b/apiserver/controllers/metadata.go
@@ -0,0 +1,125 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package controllers
+
+import (
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+
+ "github.com/gorilla/mux"
+
+ "github.com/cloudbase/garm/apiserver/params"
+)
+
+func (a *APIController) InstanceGithubRegistrationTokenHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ token, err := a.r.GetInstanceGithubRegistrationToken(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write([]byte(token)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+func (a *APIController) JITCredentialsFileHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ fileName, ok := vars["fileName"]
+ if !ok {
+ w.WriteHeader(http.StatusNotFound)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Not Found",
+ Details: "Not Found",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ dotFileName := fmt.Sprintf(".%s", fileName)
+
+ data, err := a.r.GetJITConfigFile(ctx, dotFileName)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "getting JIT config file")
+ handleError(ctx, w, err)
+ return
+ }
+
+ // Note the leading dot in the filename
+ name := fmt.Sprintf("attachment; filename=%s", dotFileName)
+ w.Header().Set("Content-Disposition", name)
+ w.Header().Set("Content-Type", "octet-stream")
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", len(data)))
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write(data); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+func (a *APIController) SystemdServiceNameHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ serviceName, err := a.r.GetRunnerServiceName(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write([]byte(serviceName)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+func (a *APIController) SystemdUnitFileHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ runAsUser := r.URL.Query().Get("runAsUser")
+
+ data, err := a.r.GenerateSystemdUnitFile(ctx, runAsUser)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write(data); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+func (a *APIController) RootCertificateBundleHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ bundle, err := a.r.GetRootCertificateBundle(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(bundle); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/organizations.go b/apiserver/controllers/organizations.go
index d03d1de4..9089f440 100644
--- a/apiserver/controllers/organizations.go
+++ b/apiserver/controllers/organizations.go
@@ -16,14 +16,15 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
gErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/apiserver/params"
runnerParams "github.com/cloudbase/garm/params"
-
- "github.com/gorilla/mux"
)
// swagger:route POST /organizations organizations CreateOrg
@@ -43,22 +44,22 @@ import (
func (a *APIController) CreateOrgHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- var repoData runnerParams.CreateOrgParams
- if err := json.NewDecoder(r.Body).Decode(&repoData); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ var orgData runnerParams.CreateOrgParams
+ if err := json.NewDecoder(r.Body).Decode(&orgData); err != nil {
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
- repo, err := a.r.CreateOrganization(ctx, repoData)
+ org, err := a.r.CreateOrganization(ctx, orgData)
if err != nil {
- log.Printf("error creating repository: %+v", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating organization")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(repo); err != nil {
- log.Printf("failed to encode response: %q", err)
+ if err := json.NewEncoder(w).Encode(org); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -66,22 +67,39 @@ func (a *APIController) CreateOrgHandler(w http.ResponseWriter, r *http.Request)
//
// List organizations.
//
+// Parameters:
+// + name: name
+// description: Exact organization name to filter by
+// type: string
+// in: query
+// required: false
+//
+// + name: endpoint
+// description: Exact endpoint name to filter by
+// type: string
+// in: query
+// required: false
+//
// Responses:
// 200: Organizations
// default: APIErrorResponse
func (a *APIController) ListOrgsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- orgs, err := a.r.ListOrganizations(ctx)
+ filter := runnerParams.OrganizationFilter{
+ Name: r.URL.Query().Get("name"),
+ Endpoint: r.URL.Query().Get("endpoint"),
+ }
+ orgs, err := a.r.ListOrganizations(ctx, filter)
if err != nil {
- log.Printf("listing orgs: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing orgs")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(orgs); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -110,21 +128,21 @@ func (a *APIController) GetOrgByIDHandler(w http.ResponseWriter, r *http.Request
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
org, err := a.r.GetOrganizationByID(ctx, orgID)
if err != nil {
- log.Printf("fetching org: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching org")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(org); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -139,6 +157,12 @@ func (a *APIController) GetOrgByIDHandler(w http.ResponseWriter, r *http.Request
// in: path
// required: true
//
+// + name: keepWebhook
+// description: If true and a webhook is installed for this organization, it will not be removed.
+// type: boolean
+// in: query
+// required: false
+//
// Responses:
// default: APIErrorResponse
func (a *APIController) DeleteOrgHandler(w http.ResponseWriter, r *http.Request) {
@@ -152,20 +176,21 @@ func (a *APIController) DeleteOrgHandler(w http.ResponseWriter, r *http.Request)
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- if err := a.r.DeleteOrganization(ctx, orgID); err != nil {
- log.Printf("removing org: %+v", err)
- handleError(w, err)
+ keepWebhook, _ := strconv.ParseBool(r.URL.Query().Get("keepWebhook"))
+
+ if err := a.r.DeleteOrganization(ctx, orgID, keepWebhook); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing org")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
// swagger:route PUT /organizations/{orgID} organizations UpdateOrg
@@ -199,27 +224,27 @@ func (a *APIController) UpdateOrgHandler(w http.ResponseWriter, r *http.Request)
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var updatePayload runnerParams.UpdateEntityParams
if err := json.NewDecoder(r.Body).Decode(&updatePayload); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
org, err := a.r.UpdateOrganization(ctx, orgID, updatePayload)
if err != nil {
- log.Printf("error updating organization: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error updating organization")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(org); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -254,28 +279,84 @@ func (a *APIController) CreateOrgPoolHandler(w http.ResponseWriter, r *http.Requ
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.CreatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.CreateOrgPool(ctx, orgID, poolData)
if err != nil {
- log.Printf("error creating organization pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating organization pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /organizations/{orgID}/scalesets organizations scalesets CreateOrgScaleSet
+//
+// Create organization scale set with the parameters given.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the organization scale set.
+// type: CreateScaleSetParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) CreateOrgScaleSetHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ orgID, ok := vars["orgID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var scalesetData runnerParams.CreateScaleSetParams
+ if err := json.NewDecoder(r.Body).Decode(&scalesetData); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.CreateEntityScaleSet(ctx, runnerParams.ForgeEntityTypeOrganization, orgID, scalesetData)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating organization scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -303,21 +384,63 @@ func (a *APIController) ListOrgPoolsHandler(w http.ResponseWriter, r *http.Reque
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pools, err := a.r.ListOrgPools(ctx, orgID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pools); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /organizations/{orgID}/scalesets organizations scalesets ListOrgScaleSets
+//
+// List organization scale sets.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ScaleSets
+// default: APIErrorResponse
+func (a *APIController) ListOrgScaleSetsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ orgID, ok := vars["orgID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ scaleSets, err := a.r.ListEntityScaleSets(ctx, runnerParams.ForgeEntityTypeOrganization, orgID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing scale sets")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSets); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -344,29 +467,29 @@ func (a *APIController) ListOrgPoolsHandler(w http.ResponseWriter, r *http.Reque
func (a *APIController) GetOrgPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
- orgID, repoOk := vars["orgID"]
+ orgID, orgOk := vars["orgID"]
poolID, poolOk := vars["poolID"]
- if !repoOk || !poolOk {
+ if !orgOk || !poolOk {
w.WriteHeader(http.StatusBadRequest)
if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
Error: "Bad Request",
Details: "No org or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pool, err := a.r.GetOrgPoolByID(ctx, orgID, poolID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -401,20 +524,19 @@ func (a *APIController) DeleteOrgPoolHandler(w http.ResponseWriter, r *http.Requ
Error: "Bad Request",
Details: "No org or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeleteOrgPool(ctx, orgID, poolID); err != nil {
- log.Printf("removing pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
// swagger:route PUT /organizations/{orgID}/pools/{poolID} organizations pools UpdateOrgPool
@@ -455,27 +577,166 @@ func (a *APIController) UpdateOrgPoolHandler(w http.ResponseWriter, r *http.Requ
Error: "Bad Request",
Details: "No org or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.UpdatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.UpdateOrgPool(ctx, orgID, poolID, poolData)
if err != nil {
- log.Printf("error creating organization pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating organization pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /organizations/{orgID}/webhook organizations hooks InstallOrgWebhook
+//
+// Install the GARM webhook for an organization. The secret configured on the organization will
+// be used to validate the requests.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the organization webhook.
+// type: InstallWebhookParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: HookInfo
+// default: APIErrorResponse
+func (a *APIController) InstallOrgWebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ orgID, orgOk := vars["orgID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var hookParam runnerParams.InstallWebhookParams
+ if err := json.NewDecoder(r.Body).Decode(&hookParam); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ info, err := a.r.InstallOrgWebhook(ctx, orgID, hookParam)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "installing webhook")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /organizations/{orgID}/webhook organizations hooks UninstallOrgWebhook
+//
+// Uninstall organization webhook.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) UninstallOrgWebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ orgID, orgOk := vars["orgID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ if err := a.r.UninstallOrgWebhook(ctx, orgID); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing webhook")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+}
+
+// swagger:route GET /organizations/{orgID}/webhook organizations hooks GetOrgWebhookInfo
+//
+// Get information about the GARM installed webhook on an organization.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: HookInfo
+// default: APIErrorResponse
+func (a *APIController) GetOrgWebhookInfoHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ orgID, orgOk := vars["orgID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ info, err := a.r.GetOrgWebhookInfo(ctx, orgID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "getting webhook info")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/pools.go b/apiserver/controllers/pools.go
index 34403759..901be588 100644
--- a/apiserver/controllers/pools.go
+++ b/apiserver/controllers/pools.go
@@ -16,14 +16,14 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
+ "github.com/gorilla/mux"
+
gErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/apiserver/params"
runnerParams "github.com/cloudbase/garm/params"
-
- "github.com/gorilla/mux"
)
// swagger:route GET /pools pools ListPools
@@ -37,16 +37,15 @@ func (a *APIController) ListAllPoolsHandler(w http.ResponseWriter, r *http.Reque
ctx := r.Context()
pools, err := a.r.ListAllPools(ctx)
-
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pools); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -75,15 +74,15 @@ func (a *APIController) GetPoolByIDHandler(w http.ResponseWriter, r *http.Reques
Error: "Bad Request",
Details: "No pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pool, err := a.r.GetPoolByID(ctx, poolID)
if err != nil {
- log.Printf("fetching pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching pool")
+ handleError(ctx, w, err)
return
}
@@ -91,7 +90,7 @@ func (a *APIController) GetPoolByIDHandler(w http.ResponseWriter, r *http.Reques
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -119,14 +118,14 @@ func (a *APIController) DeletePoolByIDHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeletePoolByID(ctx, poolID); err != nil {
- log.Printf("removing pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing pool")
+ handleError(ctx, w, err)
return
}
@@ -165,27 +164,27 @@ func (a *APIController) UpdatePoolByIDHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.UpdatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.UpdatePoolByID(ctx, poolID, poolData)
if err != nil {
- log.Printf("fetching pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/repositories.go b/apiserver/controllers/repositories.go
index 9aae826f..f3675790 100644
--- a/apiserver/controllers/repositories.go
+++ b/apiserver/controllers/repositories.go
@@ -16,14 +16,15 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
gErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/apiserver/params"
runnerParams "github.com/cloudbase/garm/params"
-
- "github.com/gorilla/mux"
)
// swagger:route POST /repositories repositories CreateRepo
@@ -45,20 +46,20 @@ func (a *APIController) CreateRepoHandler(w http.ResponseWriter, r *http.Request
var repoData runnerParams.CreateRepoParams
if err := json.NewDecoder(r.Body).Decode(&repoData); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
repo, err := a.r.CreateRepository(ctx, repoData)
if err != nil {
- log.Printf("error creating repository: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating repository")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(repo); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -66,22 +67,46 @@ func (a *APIController) CreateRepoHandler(w http.ResponseWriter, r *http.Request
//
// List repositories.
//
+// Parameters:
+// + name: owner
+// description: Exact owner name to filter by
+// type: string
+// in: query
+// required: false
+//
+// + name: name
+// description: Exact repository name to filter by
+// type: string
+// in: query
+// required: false
+//
+// + name: endpoint
+// description: Exact endpoint name to filter by
+// type: string
+// in: query
+// required: false
+//
// Responses:
// 200: Repositories
// default: APIErrorResponse
func (a *APIController) ListReposHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- repos, err := a.r.ListRepositories(ctx)
+ filter := runnerParams.RepositoryFilter{
+ Name: r.URL.Query().Get("name"),
+ Owner: r.URL.Query().Get("owner"),
+ Endpoint: r.URL.Query().Get("endpoint"),
+ }
+ repos, err := a.r.ListRepositories(ctx, filter)
if err != nil {
- log.Printf("listing repos: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing repositories")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(repos); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -110,21 +135,21 @@ func (a *APIController) GetRepoByIDHandler(w http.ResponseWriter, r *http.Reques
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
repo, err := a.r.GetRepositoryByID(ctx, repoID)
if err != nil {
- log.Printf("fetching repo: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching repository")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(repo); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -139,6 +164,12 @@ func (a *APIController) GetRepoByIDHandler(w http.ResponseWriter, r *http.Reques
// in: path
// required: true
//
+// + name: keepWebhook
+// description: If true and a webhook is installed for this repo, it will not be removed.
+// type: boolean
+// in: query
+// required: false
+//
// Responses:
// default: APIErrorResponse
func (a *APIController) DeleteRepoHandler(w http.ResponseWriter, r *http.Request) {
@@ -152,20 +183,20 @@ func (a *APIController) DeleteRepoHandler(w http.ResponseWriter, r *http.Request
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- if err := a.r.DeleteRepository(ctx, repoID); err != nil {
- log.Printf("fetching repo: %s", err)
- handleError(w, err)
+ keepWebhook, _ := strconv.ParseBool(r.URL.Query().Get("keepWebhook"))
+ if err := a.r.DeleteRepository(ctx, repoID, keepWebhook); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching repository")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
// swagger:route PUT /repositories/{repoID} repositories UpdateRepo
@@ -199,27 +230,27 @@ func (a *APIController) UpdateRepoHandler(w http.ResponseWriter, r *http.Request
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var updatePayload runnerParams.UpdateEntityParams
if err := json.NewDecoder(r.Body).Decode(&updatePayload); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
repo, err := a.r.UpdateRepository(ctx, repoID, updatePayload)
if err != nil {
- log.Printf("error updating repository: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error updating repository")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(repo); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -254,28 +285,84 @@ func (a *APIController) CreateRepoPoolHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.CreatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.CreateRepoPool(ctx, repoID, poolData)
if err != nil {
- log.Printf("error creating repository pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating repository pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /repositories/{repoID}/scalesets repositories scalesets CreateRepoScaleSet
+//
+// Create repository scale set with the parameters given.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the repository scale set.
+// type: CreateScaleSetParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) CreateRepoScaleSetHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ repoID, ok := vars["repoID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repo ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var scaleSetData runnerParams.CreateScaleSetParams
+ if err := json.NewDecoder(r.Body).Decode(&scaleSetData); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.CreateEntityScaleSet(ctx, runnerParams.ForgeEntityTypeRepository, repoID, scaleSetData)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating repository scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -303,21 +390,63 @@ func (a *APIController) ListRepoPoolsHandler(w http.ResponseWriter, r *http.Requ
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pools, err := a.r.ListRepoPools(ctx, repoID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pools); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /repositories/{repoID}/scalesets repositories scalesets ListRepoScaleSets
+//
+// List repository scale sets.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ScaleSets
+// default: APIErrorResponse
+func (a *APIController) ListRepoScaleSetsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ repoID, ok := vars["repoID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repo ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ scaleSets, err := a.r.ListEntityScaleSets(ctx, runnerParams.ForgeEntityTypeRepository, repoID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing scale sets")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSets); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -352,21 +481,21 @@ func (a *APIController) GetRepoPoolHandler(w http.ResponseWriter, r *http.Reques
Error: "Bad Request",
Details: "No repo or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pool, err := a.r.GetRepoPoolByID(ctx, repoID, poolID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -401,20 +530,19 @@ func (a *APIController) DeleteRepoPoolHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No repo or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeleteRepoPool(ctx, repoID, poolID); err != nil {
- log.Printf("removing pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
// swagger:route PUT /repositories/{repoID}/pools/{poolID} repositories pools UpdateRepoPool
@@ -455,27 +583,166 @@ func (a *APIController) UpdateRepoPoolHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No repo or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.UpdatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.UpdateRepoPool(ctx, repoID, poolID, poolData)
if err != nil {
- log.Printf("error creating repository pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating repository pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /repositories/{repoID}/webhook repositories hooks InstallRepoWebhook
+//
+// Install the GARM webhook for an organization. The secret configured on the organization will
+// be used to validate the requests.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the repository webhook.
+// type: InstallWebhookParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: HookInfo
+// default: APIErrorResponse
+func (a *APIController) InstallRepoWebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ repoID, orgOk := vars["repoID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repository ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var hookParam runnerParams.InstallWebhookParams
+ if err := json.NewDecoder(r.Body).Decode(&hookParam); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ info, err := a.r.InstallRepoWebhook(ctx, repoID, hookParam)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "installing webhook")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /repositories/{repoID}/webhook repositories hooks UninstallRepoWebhook
+//
+// Uninstall organization webhook.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) UninstallRepoWebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ repoID, orgOk := vars["repoID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repository ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ if err := a.r.UninstallRepoWebhook(ctx, repoID); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing webhook")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+}
+
+// swagger:route GET /repositories/{repoID}/webhook repositories hooks GetRepoWebhookInfo
+//
+// Get information about the GARM installed webhook on a repository.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: HookInfo
+// default: APIErrorResponse
+func (a *APIController) GetRepoWebhookInfoHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ repoID, orgOk := vars["repoID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repository ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ info, err := a.r.GetRepoWebhookInfo(ctx, repoID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "getting webhook info")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/scalesets.go b/apiserver/controllers/scalesets.go
new file mode 100644
index 00000000..1d26221b
--- /dev/null
+++ b/apiserver/controllers/scalesets.go
@@ -0,0 +1,211 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/apiserver/params"
+ runnerParams "github.com/cloudbase/garm/params"
+)
+
+// swagger:route GET /scalesets scalesets ListScalesets
+//
+// List all scalesets.
+//
+// Responses:
+// 200: ScaleSets
+// default: APIErrorResponse
+func (a *APIController) ListAllScaleSetsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ scalesets, err := a.r.ListAllScaleSets(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing scale sets")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scalesets); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /scalesets/{scalesetID} scalesets GetScaleSet
+//
+// Get scale set by ID.
+//
+// Parameters:
+// + name: scalesetID
+// description: ID of the scale set to fetch.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) GetScaleSetByIDHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ scaleSetID, ok := vars["scalesetID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No scale set ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+ id, err := strconv.ParseUint(scaleSetID, 10, 32)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.GetScaleSetByID(ctx, uint(id))
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ scaleSet.RunnerBootstrapTimeout = scaleSet.RunnerTimeout()
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /scalesets/{scalesetID} scalesets DeleteScaleSet
+//
+// Delete scale set by ID.
+//
+// Parameters:
+// + name: scalesetID
+// description: ID of the scale set to delete.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteScaleSetByIDHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ scalesetID, ok := vars["scalesetID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No scale set ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ id, err := strconv.ParseUint(scalesetID, 10, 32)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := a.r.DeleteScaleSetByID(ctx, uint(id)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+}
+
+// swagger:route PUT /scalesets/{scalesetID} scalesets UpdateScaleSet
+//
+// Update scale set by ID.
+//
+// Parameters:
+// + name: scalesetID
+// description: ID of the scale set to update.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters to update the scale set with.
+// type: UpdateScaleSetParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) UpdateScaleSetByIDHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ scalesetID, ok := vars["scalesetID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No scale set ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ id, err := strconv.ParseUint(scalesetID, 10, 32)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var scaleSetData runnerParams.UpdateScaleSetParams
+ if err := json.NewDecoder(r.Body).Decode(&scaleSetData); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.UpdateScaleSetByID(ctx, uint(id), scaleSetData)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "updating scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/params/params.go b/apiserver/params/params.go
index 23283a07..ec42fab6 100644
--- a/apiserver/params/params.go
+++ b/apiserver/params/params.go
@@ -14,6 +14,7 @@
package params
+// swagger:model APIErrorResponse
// APIErrorResponse holds information about an error, returned by the API
type APIErrorResponse struct {
Error string `json:"error"`
@@ -36,4 +37,9 @@ var (
Error: "init_required",
Details: "Missing superuser",
}
+ // URLsRequired is returned if the controller does not have the required URLs
+ URLsRequired = APIErrorResponse{
+ Error: "urls_required",
+ Details: "Missing required URLs. Make sure you update the metadata and callback URLs",
+ }
)
diff --git a/apiserver/routers/routers.go b/apiserver/routers/routers.go
index 55e42684..ff241165 100644
--- a/apiserver/routers/routers.go
+++ b/apiserver/routers/routers.go
@@ -40,23 +40,25 @@
// swagger:meta
package routers
-//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 generate spec --input=../swagger-models.yaml --output=../swagger.yaml --include="routers|controllers"
-//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 validate ../swagger.yaml
+//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate spec --input=../swagger-models.yaml --output=../swagger.yaml --include="routers|controllers"
+//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 validate ../swagger.yaml
//go:generate rm -rf ../../client
-//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 generate client --target=../../ --spec=../swagger.yaml
+//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate client --target=../../ --spec=../swagger.yaml
import (
_ "expvar" // Register the expvar handlers
- "io"
+ "log/slog"
"net/http"
- _ "net/http/pprof" // Register the pprof handlers
+ _ "net/http/pprof" //nolint:golangci-lint,gosec // Register the pprof handlers
+ "github.com/felixge/httpsnoop"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/apiserver/controllers"
"github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/config"
+ spaAssets "github.com/cloudbase/garm/webapp/assets"
)
func WithMetricsRouter(parentRouter *mux.Router, disableAuth bool, metricsMiddlerware auth.Middleware) *mux.Router {
@@ -82,15 +84,58 @@ func WithDebugServer(parentRouter *mux.Router) *mux.Router {
return parentRouter
}
-func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddleware, initMiddleware, instanceMiddleware auth.Middleware) *mux.Router {
+func WithWebUI(parentRouter *mux.Router, apiConfig config.APIServer) *mux.Router {
+ if parentRouter == nil {
+ return nil
+ }
+
+ if apiConfig.WebUI.EnableWebUI {
+ slog.Info("WebUI is enabled, adding webapp routes")
+ webappPath := apiConfig.WebUI.GetWebappPath()
+ slog.Info("Using webapp path", "path", webappPath)
+ // Accessing / should redirect to the UI
+ parentRouter.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, webappPath, http.StatusMovedPermanently) // 301
+ })
+ // Serve the SPA with dynamic path
+ parentRouter.PathPrefix(webappPath).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ spaAssets.ServeSPAWithPath(w, r, webappPath)
+ }).Methods("GET")
+ } else {
+ slog.Info("WebUI is disabled, skipping webapp routes")
+ }
+
+ return parentRouter
+}
+
+func requestLogger(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // gathers metrics from the upstream handlers
+ metrics := httpsnoop.CaptureMetrics(h, w, r)
+
+ slog.Info(
+ "access_log",
+ slog.String("method", r.Method),
+ slog.String("uri", r.URL.RequestURI()),
+ slog.String("user_agent", r.Header.Get("User-Agent")),
+ slog.String("ip", r.RemoteAddr),
+ slog.Int("code", metrics.Code),
+ slog.Int64("bytes", metrics.Written),
+ slog.Duration("request_time", metrics.Duration),
+ )
+ })
+}
+
+func NewAPIRouter(han *controllers.APIController, authMiddleware, initMiddleware, urlsRequiredMiddleware, instanceMiddleware auth.Middleware, manageWebhooks bool) *mux.Router {
router := mux.NewRouter()
- logMiddleware := util.NewLoggingMiddleware(logWriter)
- router.Use(logMiddleware)
+ router.Use(requestLogger)
// Handles github webhooks
webhookRouter := router.PathPrefix("/webhooks").Subrouter()
- webhookRouter.PathPrefix("/").Handler(http.HandlerFunc(han.CatchAll))
- webhookRouter.PathPrefix("").Handler(http.HandlerFunc(han.CatchAll))
+ webhookRouter.Handle("/", http.HandlerFunc(han.WebhookHandler))
+ webhookRouter.Handle("", http.HandlerFunc(han.WebhookHandler))
+ webhookRouter.Handle("/{controllerID}/", http.HandlerFunc(han.WebhookHandler))
+ webhookRouter.Handle("/{controllerID}", http.HandlerFunc(han.WebhookHandler))
// Handles API calls
apiSubRouter := router.PathPrefix("/api/v1").Subrouter()
@@ -104,20 +149,66 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
callbackRouter := apiSubRouter.PathPrefix("/callbacks").Subrouter()
callbackRouter.Handle("/status/", http.HandlerFunc(han.InstanceStatusMessageHandler)).Methods("POST", "OPTIONS")
callbackRouter.Handle("/status", http.HandlerFunc(han.InstanceStatusMessageHandler)).Methods("POST", "OPTIONS")
+ callbackRouter.Handle("/system-info/", http.HandlerFunc(han.InstanceSystemInfoHandler)).Methods("POST", "OPTIONS")
+ callbackRouter.Handle("/system-info", http.HandlerFunc(han.InstanceSystemInfoHandler)).Methods("POST", "OPTIONS")
callbackRouter.Use(instanceMiddleware.Middleware)
+ ///////////////////
+ // Metadata URLs //
+ ///////////////////
metadataRouter := apiSubRouter.PathPrefix("/metadata").Subrouter()
+ metadataRouter.Use(instanceMiddleware.Middleware)
+
+ // Registration token
metadataRouter.Handle("/runner-registration-token/", http.HandlerFunc(han.InstanceGithubRegistrationTokenHandler)).Methods("GET", "OPTIONS")
metadataRouter.Handle("/runner-registration-token", http.HandlerFunc(han.InstanceGithubRegistrationTokenHandler)).Methods("GET", "OPTIONS")
- metadataRouter.Use(instanceMiddleware.Middleware)
+ // JIT credential files
+ metadataRouter.Handle("/credentials/{fileName}/", http.HandlerFunc(han.JITCredentialsFileHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/credentials/{fileName}", http.HandlerFunc(han.JITCredentialsFileHandler)).Methods("GET", "OPTIONS")
+ // Systemd files
+ metadataRouter.Handle("/system/service-name/", http.HandlerFunc(han.SystemdServiceNameHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/system/service-name", http.HandlerFunc(han.SystemdServiceNameHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/systemd/unit-file/", http.HandlerFunc(han.SystemdUnitFileHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/systemd/unit-file", http.HandlerFunc(han.SystemdUnitFileHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/system/cert-bundle/", http.HandlerFunc(han.RootCertificateBundleHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/system/cert-bundle", http.HandlerFunc(han.RootCertificateBundleHandler)).Methods("GET", "OPTIONS")
+
// Login
authRouter := apiSubRouter.PathPrefix("/auth").Subrouter()
authRouter.Handle("/{login:login\\/?}", http.HandlerFunc(han.LoginHandler)).Methods("POST", "OPTIONS")
authRouter.Use(initMiddleware.Middleware)
+ //////////////////////////
+ // Controller endpoints //
+ //////////////////////////
+ controllerRouter := apiSubRouter.PathPrefix("/controller").Subrouter()
+ // The controller endpoints allow us to get information about the controller and update the URL endpoints.
+ // This endpoint must not be guarded by the urlsRequiredMiddleware as that would prevent the user from
+ // updating the URLs.
+ controllerRouter.Use(initMiddleware.Middleware)
+ controllerRouter.Use(authMiddleware.Middleware)
+ controllerRouter.Use(auth.AdminRequiredMiddleware)
+ // Get controller info
+ controllerRouter.Handle("/", http.HandlerFunc(han.ControllerInfoHandler)).Methods("GET", "OPTIONS")
+ controllerRouter.Handle("", http.HandlerFunc(han.ControllerInfoHandler)).Methods("GET", "OPTIONS")
+ // Update controller
+ controllerRouter.Handle("/", http.HandlerFunc(han.UpdateControllerHandler)).Methods("PUT", "OPTIONS")
+ controllerRouter.Handle("", http.HandlerFunc(han.UpdateControllerHandler)).Methods("PUT", "OPTIONS")
+
+ ////////////////////////////////////
+ // API router for everything else //
+ ////////////////////////////////////
apiRouter := apiSubRouter.PathPrefix("").Subrouter()
apiRouter.Use(initMiddleware.Middleware)
+ // all endpoints except the controller endpoint should return an error
+ // if the required metadata, callback and webhook URLs are not set.
+ apiRouter.Use(urlsRequiredMiddleware.Middleware)
apiRouter.Use(authMiddleware.Middleware)
+ apiRouter.Use(auth.AdminRequiredMiddleware)
+
+ // Legacy controller path
+ apiRouter.Handle("/controller-info/", http.HandlerFunc(han.ControllerInfoHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/controller-info", http.HandlerFunc(han.ControllerInfoHandler)).Methods("GET", "OPTIONS")
// Metrics Token
apiRouter.Handle("/metrics-token/", http.HandlerFunc(han.MetricsTokenHandler)).Methods("GET", "OPTIONS")
@@ -149,6 +240,25 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/pools/{poolID}/instances/", http.HandlerFunc(han.ListPoolInstancesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/pools/{poolID}/instances", http.HandlerFunc(han.ListPoolInstancesHandler)).Methods("GET", "OPTIONS")
+ ////////////////
+ // Scale sets //
+ ////////////////
+ // List all pools
+ apiRouter.Handle("/scalesets/", http.HandlerFunc(han.ListAllScaleSetsHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/scalesets", http.HandlerFunc(han.ListAllScaleSetsHandler)).Methods("GET", "OPTIONS")
+ // Get one pool
+ apiRouter.Handle("/scalesets/{scalesetID}/", http.HandlerFunc(han.GetScaleSetByIDHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/scalesets/{scalesetID}", http.HandlerFunc(han.GetScaleSetByIDHandler)).Methods("GET", "OPTIONS")
+ // Delete one pool
+ apiRouter.Handle("/scalesets/{scalesetID}/", http.HandlerFunc(han.DeleteScaleSetByIDHandler)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/scalesets/{scalesetID}", http.HandlerFunc(han.DeleteScaleSetByIDHandler)).Methods("DELETE", "OPTIONS")
+ // Update one pool
+ apiRouter.Handle("/scalesets/{scalesetID}/", http.HandlerFunc(han.UpdateScaleSetByIDHandler)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/scalesets/{scalesetID}", http.HandlerFunc(han.UpdateScaleSetByIDHandler)).Methods("PUT", "OPTIONS")
+ // List pool instances
+ apiRouter.Handle("/scalesets/{scalesetID}/instances/", http.HandlerFunc(han.ListScaleSetInstancesHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/scalesets/{scalesetID}/instances", http.HandlerFunc(han.ListScaleSetInstancesHandler)).Methods("GET", "OPTIONS")
+
/////////////
// Runners //
/////////////
@@ -181,6 +291,14 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/repositories/{repoID}/pools/", http.HandlerFunc(han.CreateRepoPoolHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/repositories/{repoID}/pools", http.HandlerFunc(han.CreateRepoPoolHandler)).Methods("POST", "OPTIONS")
+ // Create scale set
+ apiRouter.Handle("/repositories/{repoID}/scalesets/", http.HandlerFunc(han.CreateRepoScaleSetHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/scalesets", http.HandlerFunc(han.CreateRepoScaleSetHandler)).Methods("POST", "OPTIONS")
+
+ // List scale sets
+ apiRouter.Handle("/repositories/{repoID}/scalesets/", http.HandlerFunc(han.ListRepoScaleSetsHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/scalesets", http.HandlerFunc(han.ListRepoScaleSetsHandler)).Methods("GET", "OPTIONS")
+
// Repo instances list
apiRouter.Handle("/repositories/{repoID}/instances/", http.HandlerFunc(han.ListRepoInstancesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/repositories/{repoID}/instances", http.HandlerFunc(han.ListRepoInstancesHandler)).Methods("GET", "OPTIONS")
@@ -201,6 +319,17 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/repositories/", http.HandlerFunc(han.CreateRepoHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/repositories", http.HandlerFunc(han.CreateRepoHandler)).Methods("POST", "OPTIONS")
+ if manageWebhooks {
+ // Install Webhook
+ apiRouter.Handle("/repositories/{repoID}/webhook/", http.HandlerFunc(han.InstallRepoWebhookHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/webhook", http.HandlerFunc(han.InstallRepoWebhookHandler)).Methods("POST", "OPTIONS")
+ // Uninstall Webhook
+ apiRouter.Handle("/repositories/{repoID}/webhook/", http.HandlerFunc(han.UninstallRepoWebhookHandler)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/webhook", http.HandlerFunc(han.UninstallRepoWebhookHandler)).Methods("DELETE", "OPTIONS")
+ // Get webhook info
+ apiRouter.Handle("/repositories/{repoID}/webhook/", http.HandlerFunc(han.GetRepoWebhookInfoHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/webhook", http.HandlerFunc(han.GetRepoWebhookInfoHandler)).Methods("GET", "OPTIONS")
+ }
/////////////////////////////
// Organizations and pools //
/////////////////////////////
@@ -220,6 +349,14 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/organizations/{orgID}/pools/", http.HandlerFunc(han.CreateOrgPoolHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/organizations/{orgID}/pools", http.HandlerFunc(han.CreateOrgPoolHandler)).Methods("POST", "OPTIONS")
+ // Create org scale set
+ apiRouter.Handle("/organizations/{orgID}/scalesets/", http.HandlerFunc(han.CreateOrgScaleSetHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/scalesets", http.HandlerFunc(han.CreateOrgScaleSetHandler)).Methods("POST", "OPTIONS")
+
+ // List org scale sets
+ apiRouter.Handle("/organizations/{orgID}/scalesets/", http.HandlerFunc(han.ListOrgScaleSetsHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/scalesets", http.HandlerFunc(han.ListOrgScaleSetsHandler)).Methods("GET", "OPTIONS")
+
// Org instances list
apiRouter.Handle("/organizations/{orgID}/instances/", http.HandlerFunc(han.ListOrgInstancesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/organizations/{orgID}/instances", http.HandlerFunc(han.ListOrgInstancesHandler)).Methods("GET", "OPTIONS")
@@ -240,6 +377,17 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/organizations/", http.HandlerFunc(han.CreateOrgHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/organizations", http.HandlerFunc(han.CreateOrgHandler)).Methods("POST", "OPTIONS")
+ if manageWebhooks {
+ // Install Webhook
+ apiRouter.Handle("/organizations/{orgID}/webhook/", http.HandlerFunc(han.InstallOrgWebhookHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/webhook", http.HandlerFunc(han.InstallOrgWebhookHandler)).Methods("POST", "OPTIONS")
+ // Uninstall Webhook
+ apiRouter.Handle("/organizations/{orgID}/webhook/", http.HandlerFunc(han.UninstallOrgWebhookHandler)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/webhook", http.HandlerFunc(han.UninstallOrgWebhookHandler)).Methods("DELETE", "OPTIONS")
+ // Get webhook info
+ apiRouter.Handle("/organizations/{orgID}/webhook/", http.HandlerFunc(han.GetOrgWebhookInfoHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/webhook", http.HandlerFunc(han.GetOrgWebhookInfoHandler)).Methods("GET", "OPTIONS")
+ }
/////////////////////////////
// Enterprises and pools //
/////////////////////////////
@@ -259,6 +407,14 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/enterprises/{enterpriseID}/pools/", http.HandlerFunc(han.CreateEnterprisePoolHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/enterprises/{enterpriseID}/pools", http.HandlerFunc(han.CreateEnterprisePoolHandler)).Methods("POST", "OPTIONS")
+ // Create enterprise scale sets
+ apiRouter.Handle("/enterprises/{enterpriseID}/scalesets/", http.HandlerFunc(han.CreateEnterpriseScaleSetHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/enterprises/{enterpriseID}/scalesets", http.HandlerFunc(han.CreateEnterpriseScaleSetHandler)).Methods("POST", "OPTIONS")
+
+ // List enterprise scale sets
+ apiRouter.Handle("/enterprises/{enterpriseID}/scalesets/", http.HandlerFunc(han.ListEnterpriseScaleSetsHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/enterprises/{enterpriseID}/scalesets", http.HandlerFunc(han.ListEnterpriseScaleSetsHandler)).Methods("GET", "OPTIONS")
+
// Enterprise instances list
apiRouter.Handle("/enterprises/{enterpriseID}/instances/", http.HandlerFunc(han.ListEnterpriseInstancesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/enterprises/{enterpriseID}/instances", http.HandlerFunc(han.ListEnterpriseInstancesHandler)).Methods("GET", "OPTIONS")
@@ -279,13 +435,103 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/enterprises/", http.HandlerFunc(han.CreateEnterpriseHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/enterprises", http.HandlerFunc(han.CreateEnterpriseHandler)).Methods("POST", "OPTIONS")
- // Credentials and providers
- apiRouter.Handle("/credentials/", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
- apiRouter.Handle("/credentials", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ // Providers
apiRouter.Handle("/providers/", http.HandlerFunc(han.ListProviders)).Methods("GET", "OPTIONS")
apiRouter.Handle("/providers", http.HandlerFunc(han.ListProviders)).Methods("GET", "OPTIONS")
- // Websocket log writer
- apiRouter.Handle("/{ws:ws\\/?}", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ //////////////////////
+ // Github Endpoints //
+ //////////////////////
+ // Create Github Endpoint
+ apiRouter.Handle("/github/endpoints/", http.HandlerFunc(han.CreateGithubEndpoint)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/github/endpoints", http.HandlerFunc(han.CreateGithubEndpoint)).Methods("POST", "OPTIONS")
+ // List Github Endpoints
+ apiRouter.Handle("/github/endpoints/", http.HandlerFunc(han.ListGithubEndpoints)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/github/endpoints", http.HandlerFunc(han.ListGithubEndpoints)).Methods("GET", "OPTIONS")
+ // Get Github Endpoint
+ apiRouter.Handle("/github/endpoints/{name}/", http.HandlerFunc(han.GetGithubEndpoint)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/github/endpoints/{name}", http.HandlerFunc(han.GetGithubEndpoint)).Methods("GET", "OPTIONS")
+ // Delete Github Endpoint
+ apiRouter.Handle("/github/endpoints/{name}/", http.HandlerFunc(han.DeleteGithubEndpoint)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/github/endpoints/{name}", http.HandlerFunc(han.DeleteGithubEndpoint)).Methods("DELETE", "OPTIONS")
+ // Update Github Endpoint
+ apiRouter.Handle("/github/endpoints/{name}/", http.HandlerFunc(han.UpdateGithubEndpoint)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/github/endpoints/{name}", http.HandlerFunc(han.UpdateGithubEndpoint)).Methods("PUT", "OPTIONS")
+
+ ////////////////////////
+ // Github credentials //
+ ////////////////////////
+ // Legacy credentials path
+ apiRouter.Handle("/credentials/", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/credentials", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ // List Github Credentials
+ apiRouter.Handle("/github/credentials/", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/github/credentials", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ // Create Github Credentials
+ apiRouter.Handle("/github/credentials/", http.HandlerFunc(han.CreateGithubCredential)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/github/credentials", http.HandlerFunc(han.CreateGithubCredential)).Methods("POST", "OPTIONS")
+ // Get Github Credential
+ apiRouter.Handle("/github/credentials/{id}/", http.HandlerFunc(han.GetGithubCredential)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/github/credentials/{id}", http.HandlerFunc(han.GetGithubCredential)).Methods("GET", "OPTIONS")
+ // Delete Github Credential
+ apiRouter.Handle("/github/credentials/{id}/", http.HandlerFunc(han.DeleteGithubCredential)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/github/credentials/{id}", http.HandlerFunc(han.DeleteGithubCredential)).Methods("DELETE", "OPTIONS")
+ // Update Github Credential
+ apiRouter.Handle("/github/credentials/{id}/", http.HandlerFunc(han.UpdateGithubCredential)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/github/credentials/{id}", http.HandlerFunc(han.UpdateGithubCredential)).Methods("PUT", "OPTIONS")
+
+ //////////////////////
+ // Gitea Endpoints //
+ //////////////////////
+ // Create Gitea Endpoint
+ apiRouter.Handle("/gitea/endpoints/", http.HandlerFunc(han.CreateGiteaEndpoint)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints", http.HandlerFunc(han.CreateGiteaEndpoint)).Methods("POST", "OPTIONS")
+ // List Gitea Endpoints
+ apiRouter.Handle("/gitea/endpoints/", http.HandlerFunc(han.ListGiteaEndpoints)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints", http.HandlerFunc(han.ListGiteaEndpoints)).Methods("GET", "OPTIONS")
+ // Get Gitea Endpoint
+ apiRouter.Handle("/gitea/endpoints/{name}/", http.HandlerFunc(han.GetGiteaEndpoint)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints/{name}", http.HandlerFunc(han.GetGiteaEndpoint)).Methods("GET", "OPTIONS")
+ // Delete Gitea Endpoint
+ apiRouter.Handle("/gitea/endpoints/{name}/", http.HandlerFunc(han.DeleteGiteaEndpoint)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints/{name}", http.HandlerFunc(han.DeleteGiteaEndpoint)).Methods("DELETE", "OPTIONS")
+ // Update Gitea Endpoint
+ apiRouter.Handle("/gitea/endpoints/{name}/", http.HandlerFunc(han.UpdateGiteaEndpoint)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints/{name}", http.HandlerFunc(han.UpdateGiteaEndpoint)).Methods("PUT", "OPTIONS")
+
+ ////////////////////////
+ // Gitea credentials //
+ ////////////////////////
+ // List Gitea Credentials
+ apiRouter.Handle("/gitea/credentials/", http.HandlerFunc(han.ListGiteaCredentials)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials", http.HandlerFunc(han.ListGiteaCredentials)).Methods("GET", "OPTIONS")
+ // Create Gitea Credentials
+ apiRouter.Handle("/gitea/credentials/", http.HandlerFunc(han.CreateGiteaCredential)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials", http.HandlerFunc(han.CreateGiteaCredential)).Methods("POST", "OPTIONS")
+ // Get Gitea Credential
+ apiRouter.Handle("/gitea/credentials/{id}/", http.HandlerFunc(han.GetGiteaCredential)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials/{id}", http.HandlerFunc(han.GetGiteaCredential)).Methods("GET", "OPTIONS")
+ // Delete Gitea Credential
+ apiRouter.Handle("/gitea/credentials/{id}/", http.HandlerFunc(han.DeleteGiteaCredential)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials/{id}", http.HandlerFunc(han.DeleteGiteaCredential)).Methods("DELETE", "OPTIONS")
+ // Update Gitea Credential
+ apiRouter.Handle("/gitea/credentials/{id}/", http.HandlerFunc(han.UpdateGiteaCredential)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials/{id}", http.HandlerFunc(han.UpdateGiteaCredential)).Methods("PUT", "OPTIONS")
+
+ /////////////////////////
+ // Websocket endpoints //
+ /////////////////////////
+ // Legacy log websocket path
+ apiRouter.Handle("/ws/", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ apiRouter.Handle("/ws", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ // Log websocket endpoint
+ apiRouter.Handle("/ws/logs/", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ apiRouter.Handle("/ws/logs", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ // DB watcher websocket endpoint
+ apiRouter.Handle("/ws/events/", http.HandlerFunc(han.EventsHandler)).Methods("GET")
+ apiRouter.Handle("/ws/events", http.HandlerFunc(han.EventsHandler)).Methods("GET")
+
+ // NotFound handler - this should be last
+ apiRouter.PathPrefix("/").HandlerFunc(han.NotFoundHandler).Methods("GET", "POST", "PUT", "DELETE", "OPTIONS")
return router
}
diff --git a/apiserver/swagger-models.yaml b/apiserver/swagger-models.yaml
index 417f178a..74eaac84 100644
--- a/apiserver/swagger-models.yaml
+++ b/apiserver/swagger-models.yaml
@@ -8,6 +8,27 @@ definitions:
import:
package: github.com/cloudbase/garm/params
alias: garm_params
+ HookInfo:
+ type: object
+ x-go-type:
+ type: HookInfo
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ ControllerInfo:
+ type: object
+ x-go-type:
+ type: ControllerInfo
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ InstallWebhookParams:
+ type: object
+ x-go-type:
+ type: InstallWebhookParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
NewUserParams:
type: object
x-go-type:
@@ -53,11 +74,11 @@ definitions:
package: github.com/cloudbase/garm/params
alias: garm_params
items:
- $ref: '#/definitions/GithubCredentials'
- GithubCredentials:
+ $ref: '#/definitions/ForgeCredentials'
+ ForgeCredentials:
type: object
x-go-type:
- type: GithubCredentials
+ type: ForgeCredentials
import:
package: github.com/cloudbase/garm/params
alias: garm_params
@@ -109,6 +130,22 @@ definitions:
import:
package: github.com/cloudbase/garm/params
alias: garm_params
+ ScaleSets:
+ type: array
+ x-go-type:
+ type: ScaleSets
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/ScaleSet'
+ ScaleSet:
+ type: object
+ x-go-type:
+ type: ScaleSet
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
Repositories:
type: array
x-go-type:
@@ -192,6 +229,13 @@ definitions:
import:
package: github.com/cloudbase/garm/params
alias: garm_params
+ CreateScaleSetParams:
+ type: object
+ x-go-type:
+ type: CreateScaleSetParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
UpdatePoolParams:
type: object
x-go-type:
@@ -199,6 +243,13 @@ definitions:
import:
package: github.com/cloudbase/garm/params
alias: garm_params
+ UpdateScaleSetParams:
+ type: object
+ x-go-type:
+ type: UpdateScaleSetParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
APIErrorResponse:
type: object
x-go-type:
@@ -206,3 +257,89 @@ definitions:
import:
package: github.com/cloudbase/garm/apiserver/params
alias: apiserver_params
+ CreateInstanceParams:
+ type: object
+ x-go-type:
+ type: CreateInstanceParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateGithubEndpointParams:
+ type: object
+ x-go-type:
+ type: UpdateGithubEndpointParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateGiteaEndpointParams:
+ type: object
+ x-go-type:
+ type: UpdateGiteaEndpointParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ ForgeEndpoint:
+ type: object
+ x-go-type:
+ type: ForgeEndpoint
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ ForgeEndpoints:
+ type: array
+ x-go-type:
+ type: ForgeEndpoints
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/ForgeEndpoint'
+ CreateGithubEndpointParams:
+ type: object
+ x-go-type:
+ type: CreateGithubEndpointParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateGiteaEndpointParams:
+ type: object
+ x-go-type:
+ type: CreateGiteaEndpointParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateGithubCredentialsParams:
+ type: object
+ x-go-type:
+ type: CreateGithubCredentialsParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateGiteaCredentialsParams:
+ type: object
+ x-go-type:
+ type: CreateGiteaCredentialsParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateGithubCredentialsParams:
+ type: object
+ x-go-type:
+ type: UpdateGithubCredentialsParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateGiteaCredentialsParams:
+ type: object
+ x-go-type:
+ type: UpdateGiteaCredentialsParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateControllerParams:
+ type: object
+ x-go-type:
+ type: UpdateControllerParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
diff --git a/apiserver/swagger.yaml b/apiserver/swagger.yaml
index 0da7dddf..bf02a2d7 100644
--- a/apiserver/swagger.yaml
+++ b/apiserver/swagger.yaml
@@ -9,6 +9,13 @@ definitions:
alias: apiserver_params
package: github.com/cloudbase/garm/apiserver/params
type: APIErrorResponse
+ ControllerInfo:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ControllerInfo
CreateEnterpriseParams:
type: object
x-go-type:
@@ -16,6 +23,41 @@ definitions:
alias: garm_params
package: github.com/cloudbase/garm/params
type: CreateEnterpriseParams
+ CreateGiteaCredentialsParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateGiteaCredentialsParams
+ CreateGiteaEndpointParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateGiteaEndpointParams
+ CreateGithubCredentialsParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateGithubCredentialsParams
+ CreateGithubEndpointParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateGithubEndpointParams
+ CreateInstanceParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateInstanceParams
CreateOrgParams:
type: object
x-go-type:
@@ -37,9 +79,16 @@ definitions:
alias: garm_params
package: github.com/cloudbase/garm/params
type: CreateRepoParams
+ CreateScaleSetParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateScaleSetParams
Credentials:
items:
- $ref: '#/definitions/GithubCredentials'
+ $ref: '#/definitions/ForgeCredentials'
type: array
x-go-type:
import:
@@ -62,13 +111,43 @@ definitions:
alias: garm_params
package: github.com/cloudbase/garm/params
type: Enterprises
- GithubCredentials:
+ ForgeCredentials:
type: object
x-go-type:
import:
alias: garm_params
package: github.com/cloudbase/garm/params
- type: GithubCredentials
+ type: ForgeCredentials
+ ForgeEndpoint:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ForgeEndpoint
+ ForgeEndpoints:
+ items:
+ $ref: '#/definitions/ForgeEndpoint'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ForgeEndpoints
+ HookInfo:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: HookInfo
+ InstallWebhookParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: InstallWebhookParams
Instance:
type: object
x-go-type:
@@ -186,6 +265,29 @@ definitions:
alias: garm_params
package: github.com/cloudbase/garm/params
type: Repository
+ ScaleSet:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ScaleSet
+ ScaleSets:
+ items:
+ $ref: '#/definitions/ScaleSet'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ScaleSets
+ UpdateControllerParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateControllerParams
UpdateEntityParams:
type: object
x-go-type:
@@ -193,6 +295,34 @@ definitions:
alias: garm_params
package: github.com/cloudbase/garm/params
type: UpdateEntityParams
+ UpdateGiteaCredentialsParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateGiteaCredentialsParams
+ UpdateGiteaEndpointParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateGiteaEndpointParams
+ UpdateGithubCredentialsParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateGithubCredentialsParams
+ UpdateGithubEndpointParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateGithubEndpointParams
UpdatePoolParams:
type: object
x-go-type:
@@ -200,6 +330,13 @@ definitions:
alias: garm_params
package: github.com/cloudbase/garm/params
type: UpdatePoolParams
+ UpdateScaleSetParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateScaleSetParams
User:
type: object
x-go-type:
@@ -239,24 +376,57 @@ paths:
summary: Logs in a user and returns a JWT token.
tags:
- login
- /credentials:
- get:
- operationId: ListCredentials
+ /controller:
+ put:
+ operationId: UpdateController
+ parameters:
+ - description: Parameters used when updating the controller.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateControllerParams'
+ description: Parameters used when updating the controller.
+ type: object
responses:
"200":
- description: Credentials
+ description: ControllerInfo
schema:
- $ref: '#/definitions/Credentials'
+ $ref: '#/definitions/ControllerInfo'
"400":
description: APIErrorResponse
schema:
$ref: '#/definitions/APIErrorResponse'
- summary: List all credentials.
+ summary: Update controller.
tags:
- - credentials
+ - controller
+ /controller-info:
+ get:
+ operationId: ControllerInfo
+ responses:
+ "200":
+ description: ControllerInfo
+ schema:
+ $ref: '#/definitions/ControllerInfo'
+ "409":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get controller info.
+ tags:
+ - controllerInfo
/enterprises:
get:
operationId: ListEnterprises
+ parameters:
+ - description: Exact enterprise name to filter by
+ in: query
+ name: name
+ type: string
+ - description: Exact endpoint name to filter by
+ in: query
+ name: endpoint
+ type: string
responses:
"200":
description: Enterprises
@@ -513,6 +683,57 @@ paths:
tags:
- enterprises
- pools
+ /enterprises/{enterpriseID}/scalesets:
+ get:
+ operationId: ListEnterpriseScaleSets
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ScaleSets
+ schema:
+ $ref: '#/definitions/ScaleSets'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List enterprise scale sets.
+ tags:
+ - enterprises
+ - scalesets
+ post:
+ operationId: CreateEnterpriseScaleSet
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ - description: Parameters used when creating the enterprise scale set.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateScaleSetParams'
+ description: Parameters used when creating the enterprise scale set.
+ type: object
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create enterprise pool with the parameters given.
+ tags:
+ - enterprises
+ - scalesets
/first-run:
post:
operationId: FirstRun
@@ -537,6 +758,418 @@ paths:
summary: Initialize the first run of the controller.
tags:
- first-run
+ /gitea/credentials:
+ get:
+ operationId: ListGiteaCredentials
+ responses:
+ "200":
+ description: Credentials
+ schema:
+ $ref: '#/definitions/Credentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all credentials.
+ tags:
+ - credentials
+ post:
+ operationId: CreateGiteaCredentials
+ parameters:
+ - description: Parameters used when creating a Gitea credential.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateGiteaCredentialsParams'
+ description: Parameters used when creating a Gitea credential.
+ type: object
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create a Gitea credential.
+ tags:
+ - credentials
+ /gitea/credentials/{id}:
+ delete:
+ operationId: DeleteGiteaCredentials
+ parameters:
+ - description: ID of the Gitea credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete a Gitea credential.
+ tags:
+ - credentials
+ get:
+ operationId: GetGiteaCredentials
+ parameters:
+ - description: ID of the Gitea credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get a Gitea credential.
+ tags:
+ - credentials
+ put:
+ operationId: UpdateGiteaCredentials
+ parameters:
+ - description: ID of the Gitea credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ - description: Parameters used when updating a Gitea credential.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateGiteaCredentialsParams'
+ description: Parameters used when updating a Gitea credential.
+ type: object
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update a Gitea credential.
+ tags:
+ - credentials
+ /gitea/endpoints:
+ get:
+ operationId: ListGiteaEndpoints
+ responses:
+ "200":
+ description: ForgeEndpoints
+ schema:
+ $ref: '#/definitions/ForgeEndpoints'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all Gitea Endpoints.
+ tags:
+ - endpoints
+ post:
+ operationId: CreateGiteaEndpoint
+ parameters:
+ - description: Parameters used when creating a Gitea endpoint.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateGiteaEndpointParams'
+ description: Parameters used when creating a Gitea endpoint.
+ type: object
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create a Gitea Endpoint.
+ tags:
+ - endpoints
+ /gitea/endpoints/{name}:
+ delete:
+ operationId: DeleteGiteaEndpoint
+ parameters:
+ - description: The name of the Gitea endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete a Gitea Endpoint.
+ tags:
+ - endpoints
+ get:
+ operationId: GetGiteaEndpoint
+ parameters:
+ - description: The name of the Gitea endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get a Gitea Endpoint.
+ tags:
+ - endpoints
+ put:
+ operationId: UpdateGiteaEndpoint
+ parameters:
+ - description: The name of the Gitea endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ - description: Parameters used when updating a Gitea endpoint.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateGiteaEndpointParams'
+ description: Parameters used when updating a Gitea endpoint.
+ type: object
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update a Gitea Endpoint.
+ tags:
+ - endpoints
+ /github/credentials:
+ get:
+ operationId: ListCredentials
+ responses:
+ "200":
+ description: Credentials
+ schema:
+ $ref: '#/definitions/Credentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all credentials.
+ tags:
+ - credentials
+ post:
+ operationId: CreateCredentials
+ parameters:
+ - description: Parameters used when creating a GitHub credential.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateGithubCredentialsParams'
+ description: Parameters used when creating a GitHub credential.
+ type: object
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create a GitHub credential.
+ tags:
+ - credentials
+ /github/credentials/{id}:
+ delete:
+ operationId: DeleteCredentials
+ parameters:
+ - description: ID of the GitHub credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete a GitHub credential.
+ tags:
+ - credentials
+ get:
+ operationId: GetCredentials
+ parameters:
+ - description: ID of the GitHub credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get a GitHub credential.
+ tags:
+ - credentials
+ put:
+ operationId: UpdateCredentials
+ parameters:
+ - description: ID of the GitHub credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ - description: Parameters used when updating a GitHub credential.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateGithubCredentialsParams'
+ description: Parameters used when updating a GitHub credential.
+ type: object
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update a GitHub credential.
+ tags:
+ - credentials
+ /github/endpoints:
+ get:
+ operationId: ListGithubEndpoints
+ responses:
+ "200":
+ description: ForgeEndpoints
+ schema:
+ $ref: '#/definitions/ForgeEndpoints'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all GitHub Endpoints.
+ tags:
+ - endpoints
+ post:
+ operationId: CreateGithubEndpoint
+ parameters:
+ - description: Parameters used when creating a GitHub endpoint.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateGithubEndpointParams'
+ description: Parameters used when creating a GitHub endpoint.
+ type: object
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create a GitHub Endpoint.
+ tags:
+ - endpoints
+ /github/endpoints/{name}:
+ delete:
+ operationId: DeleteGithubEndpoint
+ parameters:
+ - description: The name of the GitHub endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete a GitHub Endpoint.
+ tags:
+ - endpoints
+ get:
+ operationId: GetGithubEndpoint
+ parameters:
+ - description: The name of the GitHub endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get a GitHub Endpoint.
+ tags:
+ - endpoints
+ put:
+ operationId: UpdateGithubEndpoint
+ parameters:
+ - description: The name of the GitHub endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ - description: Parameters used when updating a GitHub endpoint.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateGithubEndpointParams'
+ description: Parameters used when updating a GitHub endpoint.
+ type: object
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update a GitHub Endpoint.
+ tags:
+ - endpoints
/instances:
get:
operationId: ListInstances
@@ -561,6 +1194,14 @@ paths:
name: instanceName
required: true
type: string
+ - description: If true GARM will ignore any provider error when removing the runner and will continue to remove the runner from github and the GARM database.
+ in: query
+ name: forceRemove
+ type: boolean
+ - description: If true GARM will ignore unauthorized errors returned by GitHub when removing a runner. This is useful if you want to clean up runners and your credentials have expired.
+ in: query
+ name: bypassGHUnauthorized
+ type: boolean
responses:
default:
description: APIErrorResponse
@@ -622,6 +1263,15 @@ paths:
/organizations:
get:
operationId: ListOrgs
+ parameters:
+ - description: Exact organization name to filter by
+ in: query
+ name: name
+ type: string
+ - description: Exact endpoint name to filter by
+ in: query
+ name: endpoint
+ type: string
responses:
"200":
description: Organizations
@@ -666,6 +1316,10 @@ paths:
name: orgID
required: true
type: string
+ - description: If true and a webhook is installed for this organization, it will not be removed.
+ in: query
+ name: keepWebhook
+ type: boolean
responses:
default:
description: APIErrorResponse
@@ -878,6 +1532,127 @@ paths:
tags:
- organizations
- pools
+ /organizations/{orgID}/scalesets:
+ get:
+ operationId: ListOrgScaleSets
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ScaleSets
+ schema:
+ $ref: '#/definitions/ScaleSets'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List organization scale sets.
+ tags:
+ - organizations
+ - scalesets
+ post:
+ operationId: CreateOrgScaleSet
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: Parameters used when creating the organization scale set.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateScaleSetParams'
+ description: Parameters used when creating the organization scale set.
+ type: object
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create organization scale set with the parameters given.
+ tags:
+ - organizations
+ - scalesets
+ /organizations/{orgID}/webhook:
+ delete:
+ operationId: UninstallOrgWebhook
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Uninstall organization webhook.
+ tags:
+ - organizations
+ - hooks
+ get:
+ operationId: GetOrgWebhookInfo
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: HookInfo
+ schema:
+ $ref: '#/definitions/HookInfo'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get information about the GARM installed webhook on an organization.
+ tags:
+ - organizations
+ - hooks
+ post:
+ description: |-
+ Install the GARM webhook for an organization. The secret configured on the organization will
+ be used to validate the requests.
+ operationId: InstallOrgWebhook
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: Parameters used when creating the organization webhook.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/InstallWebhookParams'
+ description: Parameters used when creating the organization webhook.
+ type: object
+ responses:
+ "200":
+ description: HookInfo
+ schema:
+ $ref: '#/definitions/HookInfo'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ tags:
+ - organizations
+ - hooks
/pools:
get:
operationId: ListPools
@@ -997,6 +1772,19 @@ paths:
/repositories:
get:
operationId: ListRepos
+ parameters:
+ - description: Exact owner name to filter by
+ in: query
+ name: owner
+ type: string
+ - description: Exact repository name to filter by
+ in: query
+ name: name
+ type: string
+ - description: Exact endpoint name to filter by
+ in: query
+ name: endpoint
+ type: string
responses:
"200":
description: Repositories
@@ -1041,6 +1829,10 @@ paths:
name: repoID
required: true
type: string
+ - description: If true and a webhook is installed for this repo, it will not be removed.
+ in: query
+ name: keepWebhook
+ type: boolean
responses:
default:
description: APIErrorResponse
@@ -1253,6 +2045,228 @@ paths:
tags:
- repositories
- pools
+ /repositories/{repoID}/scalesets:
+ get:
+ operationId: ListRepoScaleSets
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ScaleSets
+ schema:
+ $ref: '#/definitions/ScaleSets'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List repository scale sets.
+ tags:
+ - repositories
+ - scalesets
+ post:
+ operationId: CreateRepoScaleSet
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: Parameters used when creating the repository scale set.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateScaleSetParams'
+ description: Parameters used when creating the repository scale set.
+ type: object
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create repository scale set with the parameters given.
+ tags:
+ - repositories
+ - scalesets
+ /repositories/{repoID}/webhook:
+ delete:
+ operationId: UninstallRepoWebhook
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Uninstall organization webhook.
+ tags:
+ - repositories
+ - hooks
+ get:
+ operationId: GetRepoWebhookInfo
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: HookInfo
+ schema:
+ $ref: '#/definitions/HookInfo'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get information about the GARM installed webhook on a repository.
+ tags:
+ - repositories
+ - hooks
+ post:
+ description: |-
+ Install the GARM webhook for an organization. The secret configured on the organization will
+ be used to validate the requests.
+ operationId: InstallRepoWebhook
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: Parameters used when creating the repository webhook.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/InstallWebhookParams'
+ description: Parameters used when creating the repository webhook.
+ type: object
+ responses:
+ "200":
+ description: HookInfo
+ schema:
+ $ref: '#/definitions/HookInfo'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ tags:
+ - repositories
+ - hooks
+ /scalesets:
+ get:
+ operationId: ListScalesets
+ responses:
+ "200":
+ description: ScaleSets
+ schema:
+ $ref: '#/definitions/ScaleSets'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all scalesets.
+ tags:
+ - scalesets
+ /scalesets/{scalesetID}:
+ delete:
+ operationId: DeleteScaleSet
+ parameters:
+ - description: ID of the scale set to delete.
+ in: path
+ name: scalesetID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete scale set by ID.
+ tags:
+ - scalesets
+ get:
+ operationId: GetScaleSet
+ parameters:
+ - description: ID of the scale set to fetch.
+ in: path
+ name: scalesetID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get scale set by ID.
+ tags:
+ - scalesets
+ put:
+ operationId: UpdateScaleSet
+ parameters:
+ - description: ID of the scale set to update.
+ in: path
+ name: scalesetID
+ required: true
+ type: string
+ - description: Parameters to update the scale set with.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateScaleSetParams'
+ description: Parameters to update the scale set with.
+ type: object
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update scale set by ID.
+ tags:
+ - scalesets
+ /scalesets/{scalesetID}/instances:
+ get:
+ operationId: ListScaleSetInstances
+ parameters:
+ - description: Runner scale set ID.
+ in: path
+ name: scalesetID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Instances
+ schema:
+ $ref: '#/definitions/Instances'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List runner instances in a scale set.
+ tags:
+ - instances
produces:
- application/json
security:
diff --git a/auth/admin_required.go b/auth/admin_required.go
new file mode 100644
index 00000000..b3ca3624
--- /dev/null
+++ b/auth/admin_required.go
@@ -0,0 +1,27 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package auth
+
+import "net/http"
+
+func AdminRequiredMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ if !IsAdmin(ctx) {
+ http.Error(w, "Unauthorized", http.StatusUnauthorized)
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/auth/auth.go b/auth/auth.go
index d912bee6..c5fa1ebd 100644
--- a/auth/auth.go
+++ b/auth/auth.go
@@ -16,18 +16,19 @@ package auth
import (
"context"
+ "errors"
+ "fmt"
"time"
+ jwt "github.com/golang-jwt/jwt/v5"
+ "github.com/nbutton23/zxcvbn-go"
+ "golang.org/x/crypto/bcrypt"
+
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database/common"
"github.com/cloudbase/garm/params"
-
- "github.com/golang-jwt/jwt"
- "github.com/nbutton23/zxcvbn-go"
- "github.com/pkg/errors"
- "golang.org/x/crypto/bcrypt"
)
func NewAuthenticator(cfg config.JWTAuth, store common.Store) *Authenticator {
@@ -49,24 +50,30 @@ func (a *Authenticator) IsInitialized() bool {
func (a *Authenticator) GetJWTToken(ctx context.Context) (string, error) {
tokenID, err := util.GetRandomString(16)
if err != nil {
- return "", errors.Wrap(err, "generating random string")
+ return "", fmt.Errorf("error generating random string: %w", err)
}
- expireToken := time.Now().Add(a.cfg.TimeToLive.Duration()).Unix()
+ expireToken := time.Now().Add(a.cfg.TimeToLive.Duration())
+ expires := &jwt.NumericDate{
+ Time: expireToken,
+ }
+ generation := PasswordGeneration(ctx)
claims := JWTClaims{
- StandardClaims: jwt.StandardClaims{
- ExpiresAt: expireToken,
+ RegisteredClaims: jwt.RegisteredClaims{
+ ExpiresAt: expires,
+ // nolint:golangci-lint,godox
// TODO: make this configurable
Issuer: "garm",
},
- UserID: UserID(ctx),
- TokenID: tokenID,
- IsAdmin: IsAdmin(ctx),
- FullName: FullName(ctx),
+ UserID: UserID(ctx),
+ TokenID: tokenID,
+ IsAdmin: IsAdmin(ctx),
+ FullName: FullName(ctx),
+ Generation: generation,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString([]byte(a.cfg.Secret))
if err != nil {
- return "", errors.Wrap(err, "fetching token string")
+ return "", fmt.Errorf("error fetching token string: %w", err)
}
return tokenString, nil
@@ -75,22 +82,26 @@ func (a *Authenticator) GetJWTToken(ctx context.Context) (string, error) {
// GetJWTMetricsToken returns a JWT token that can be used to read metrics.
// This token is not tied to a user, no user is stored in the db.
func (a *Authenticator) GetJWTMetricsToken(ctx context.Context) (string, error) {
-
if !IsAdmin(ctx) {
return "", runnerErrors.ErrUnauthorized
}
tokenID, err := util.GetRandomString(16)
if err != nil {
- return "", errors.Wrap(err, "generating random string")
+ return "", fmt.Errorf("error generating random string: %w", err)
}
+ // nolint:golangci-lint,godox
// TODO: currently this is the same TTL as the normal Token
// maybe we should make this configurable
// it's usually pretty nasty if the monitoring fails because the token expired
- expireToken := time.Now().Add(a.cfg.TimeToLive.Duration()).Unix()
+ expireToken := time.Now().Add(a.cfg.TimeToLive.Duration())
+ expires := &jwt.NumericDate{
+ Time: expireToken,
+ }
claims := JWTClaims{
- StandardClaims: jwt.StandardClaims{
- ExpiresAt: expireToken,
+ RegisteredClaims: jwt.RegisteredClaims{
+ ExpiresAt: expires,
+ // nolint:golangci-lint,godox
// TODO: make this configurable
Issuer: "garm",
},
@@ -101,7 +112,7 @@ func (a *Authenticator) GetJWTMetricsToken(ctx context.Context) (string, error)
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString([]byte(a.cfg.Secret))
if err != nil {
- return "", errors.Wrap(err, "fetching token string")
+ return "", fmt.Errorf("error fetching token string: %w", err)
}
return tokenString, nil
@@ -111,7 +122,7 @@ func (a *Authenticator) InitController(ctx context.Context, param params.NewUser
_, err := a.store.ControllerInfo()
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.User{}, errors.Wrap(err, "initializing controller")
+ return params.User{}, fmt.Errorf("error initializing controller: %w", err)
}
}
if a.store.HasAdminUser(ctx) {
@@ -141,7 +152,7 @@ func (a *Authenticator) InitController(ctx context.Context, param params.NewUser
hashed, err := util.PaswsordToBcrypt(param.Password)
if err != nil {
- return params.User{}, errors.Wrap(err, "creating user")
+ return params.User{}, fmt.Errorf("error creating user: %w", err)
}
param.Password = hashed
@@ -159,7 +170,7 @@ func (a *Authenticator) AuthenticateUser(ctx context.Context, info params.Passwo
if errors.Is(err, runnerErrors.ErrNotFound) {
return ctx, runnerErrors.ErrUnauthorized
}
- return ctx, errors.Wrap(err, "authenticating")
+ return ctx, fmt.Errorf("error authenticating: %w", err)
}
if !user.Enabled {
@@ -174,5 +185,5 @@ func (a *Authenticator) AuthenticateUser(ctx context.Context, info params.Passwo
return ctx, runnerErrors.ErrUnauthorized
}
- return PopulateContext(ctx, user), nil
+ return PopulateContext(ctx, user, nil), nil
}
diff --git a/auth/context.go b/auth/context.go
index 27845288..1b648bb6 100644
--- a/auth/context.go
+++ b/auth/context.go
@@ -16,7 +16,9 @@ package auth
import (
"context"
+ "time"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/params"
)
@@ -27,9 +29,11 @@ const (
fullNameKey contextFlags = "full_name"
readMetricsKey contextFlags = "read_metrics"
// UserIDFlag is the User ID flag we set in the context
- UserIDFlag contextFlags = "user_id"
- isEnabledFlag contextFlags = "is_enabled"
- jwtTokenFlag contextFlags = "jwt_token"
+ UserIDFlag contextFlags = "user_id"
+ isEnabledFlag contextFlags = "is_enabled"
+ jwtTokenFlag contextFlags = "jwt_token"
+ authExpiresFlag contextFlags = "auth_expires"
+ passwordGenerationFlag contextFlags = "password_generation"
instanceIDKey contextFlags = "id"
instanceNameKey contextFlags = "name"
@@ -38,8 +42,23 @@ const (
instanceEntityKey contextFlags = "entity"
instanceRunnerStatus contextFlags = "status"
instanceTokenFetched contextFlags = "tokenFetched"
+ instanceHasJITConfig contextFlags = "hasJITConfig"
+ instanceParams contextFlags = "instanceParams"
+ instanceForgeTypeKey contextFlags = "forge_type"
)
+func SetInstanceForgeType(ctx context.Context, val string) context.Context {
+ return context.WithValue(ctx, instanceForgeTypeKey, val)
+}
+
+func InstanceForgeType(ctx context.Context) params.EndpointType {
+ elem := ctx.Value(instanceForgeTypeKey)
+ if elem == nil {
+ return ""
+ }
+ return elem.(params.EndpointType)
+}
+
func SetInstanceID(ctx context.Context, id string) context.Context {
return context.WithValue(ctx, instanceIDKey, id)
}
@@ -64,6 +83,35 @@ func InstanceTokenFetched(ctx context.Context) bool {
return elem.(bool)
}
+func SetInstanceHasJITConfig(ctx context.Context, cfg map[string]string) context.Context {
+ return context.WithValue(ctx, instanceHasJITConfig, len(cfg) > 0)
+}
+
+func InstanceHasJITConfig(ctx context.Context) bool {
+ elem := ctx.Value(instanceHasJITConfig)
+ if elem == nil {
+ return false
+ }
+ return elem.(bool)
+}
+
+func SetInstanceParams(ctx context.Context, instance params.Instance) context.Context {
+ return context.WithValue(ctx, instanceParams, instance)
+}
+
+func InstanceParams(ctx context.Context) (params.Instance, error) {
+ elem := ctx.Value(instanceParams)
+ if elem == nil {
+ return params.Instance{}, runnerErrors.ErrNotFound
+ }
+
+ instanceParams, ok := elem.(params.Instance)
+ if !ok {
+ return params.Instance{}, runnerErrors.ErrNotFound
+ }
+ return instanceParams, nil
+}
+
func SetInstanceRunnerStatus(ctx context.Context, val params.RunnerStatus) context.Context {
return context.WithValue(ctx, instanceRunnerStatus, val)
}
@@ -124,25 +172,57 @@ func InstanceEntity(ctx context.Context) string {
return elem.(string)
}
-func PopulateInstanceContext(ctx context.Context, instance params.Instance) context.Context {
+func PopulateInstanceContext(ctx context.Context, instance params.Instance, claims *InstanceJWTClaims) context.Context {
ctx = SetInstanceID(ctx, instance.ID)
ctx = SetInstanceName(ctx, instance.Name)
ctx = SetInstancePoolID(ctx, instance.PoolID)
ctx = SetInstanceRunnerStatus(ctx, instance.RunnerStatus)
ctx = SetInstanceTokenFetched(ctx, instance.TokenFetched)
+ ctx = SetInstanceHasJITConfig(ctx, instance.JitConfiguration)
+ ctx = SetInstanceParams(ctx, instance)
+ ctx = SetInstanceForgeType(ctx, claims.ForgeType)
return ctx
}
// PopulateContext sets the appropriate fields in the context, based on
// the user object
-func PopulateContext(ctx context.Context, user params.User) context.Context {
+func PopulateContext(ctx context.Context, user params.User, authExpires *time.Time) context.Context {
ctx = SetUserID(ctx, user.ID)
ctx = SetAdmin(ctx, user.IsAdmin)
ctx = SetIsEnabled(ctx, user.Enabled)
ctx = SetFullName(ctx, user.FullName)
+ ctx = SetExpires(ctx, authExpires)
+ ctx = SetPasswordGeneration(ctx, user.Generation)
return ctx
}
+func SetExpires(ctx context.Context, expires *time.Time) context.Context {
+ if expires == nil {
+ return ctx
+ }
+ return context.WithValue(ctx, authExpiresFlag, expires)
+}
+
+func Expires(ctx context.Context) *time.Time {
+ elem := ctx.Value(authExpiresFlag)
+ if elem == nil {
+ return nil
+ }
+ return elem.(*time.Time)
+}
+
+func SetPasswordGeneration(ctx context.Context, val uint) context.Context {
+ return context.WithValue(ctx, passwordGenerationFlag, val)
+}
+
+func PasswordGeneration(ctx context.Context) uint {
+ elem := ctx.Value(passwordGenerationFlag)
+ if elem == nil {
+ return 0
+ }
+ return elem.(uint)
+}
+
// SetFullName sets the user full name in the context
func SetFullName(ctx context.Context, fullName string) context.Context {
return context.WithValue(ctx, fullNameKey, fullName)
@@ -204,8 +284,10 @@ func UserID(ctx context.Context) string {
// GetAdminContext will return an admin context. This can be used internally
// when fetching users.
-func GetAdminContext() context.Context {
- ctx := context.Background()
+func GetAdminContext(ctx context.Context) context.Context {
+ if ctx == nil {
+ ctx = context.Background()
+ }
ctx = SetUserID(ctx, "")
ctx = SetAdmin(ctx, true)
ctx = SetIsEnabled(ctx, true)
diff --git a/auth/init_required.go b/auth/init_required.go
index 6265649e..3ef31d70 100644
--- a/auth/init_required.go
+++ b/auth/init_required.go
@@ -16,7 +16,7 @@ package auth
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
"github.com/cloudbase/garm/apiserver/params"
@@ -37,16 +37,44 @@ type initRequired struct {
// Middleware implements the middleware interface
func (i *initRequired) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- ctrlInfo, err := i.store.ControllerInfo()
- if err != nil || ctrlInfo.ControllerID.String() == "" {
+ ctx := r.Context()
+
+ if !i.store.HasAdminUser(ctx) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusConflict)
if err := json.NewEncoder(w).Encode(params.InitializationRequired); err != nil {
- log.Printf("failed to encode response: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- ctx := r.Context()
+
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+
+func NewUrlsRequiredMiddleware(store common.Store) (Middleware, error) {
+ return &urlsRequired{
+ store: store,
+ }, nil
+}
+
+type urlsRequired struct {
+ store common.Store
+}
+
+func (u *urlsRequired) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ ctrlInfo, err := u.store.ControllerInfo()
+ if err != nil || ctrlInfo.MetadataURL == "" || ctrlInfo.CallbackURL == "" {
+ w.Header().Add("Content-Type", "application/json")
+ w.WriteHeader(http.StatusConflict)
+ if err := json.NewEncoder(w).Encode(params.URLsRequired); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
next.ServeHTTP(w, r.WithContext(ctx))
})
}
diff --git a/auth/instance_middleware.go b/auth/instance_middleware.go
index 01e2e6d0..6d1d66e4 100644
--- a/auth/instance_middleware.go
+++ b/auth/instance_middleware.go
@@ -17,18 +17,20 @@ package auth
import (
"context"
"fmt"
+ "log/slog"
+ "math"
"net/http"
"strings"
"time"
+ jwt "github.com/golang-jwt/jwt/v5"
+
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
"github.com/cloudbase/garm/config"
dbCommon "github.com/cloudbase/garm/database/common"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
-
- "github.com/golang-jwt/jwt"
- "github.com/pkg/errors"
)
// InstanceJWTClaims holds JWT claims
@@ -37,32 +39,58 @@ type InstanceJWTClaims struct {
Name string `json:"name"`
PoolID string `json:"provider_id"`
// Scope is either repository or organization
- Scope params.PoolType `json:"scope"`
+ Scope params.ForgeEntityType `json:"scope"`
// Entity is the repo or org name
- Entity string `json:"entity"`
- jwt.StandardClaims
+ Entity string `json:"entity"`
+ CreateAttempt int `json:"create_attempt"`
+ ForgeType string `json:"forge_type"`
+ jwt.RegisteredClaims
}
-func NewInstanceJWTToken(instance params.Instance, secret, entity string, poolType params.PoolType, ttlMinutes uint) (string, error) {
+func NewInstanceTokenGetter(jwtSecret string) (InstanceTokenGetter, error) {
+ if jwtSecret == "" {
+ return nil, fmt.Errorf("jwt secret is required")
+ }
+ return &instanceToken{
+ jwtSecret: jwtSecret,
+ }, nil
+}
+
+type instanceToken struct {
+ jwtSecret string
+}
+
+func (i *instanceToken) NewInstanceJWTToken(instance params.Instance, entity params.ForgeEntity, entityType params.ForgeEntityType, ttlMinutes uint) (string, error) {
// Token expiration is equal to the bootstrap timeout set on the pool plus the polling
// interval garm uses to check for timed out runners. Runners that have not sent their info
// by the end of this interval are most likely failed and will be reaped by garm anyway.
- expireToken := time.Now().Add(time.Duration(ttlMinutes)*time.Minute + common.PoolReapTimeoutInterval).Unix()
+ var ttl int
+ if ttlMinutes > math.MaxInt {
+ ttl = math.MaxInt
+ } else {
+ ttl = int(ttlMinutes)
+ }
+ expireToken := time.Now().Add(time.Duration(ttl)*time.Minute + common.PoolReapTimeoutInterval)
+ expires := &jwt.NumericDate{
+ Time: expireToken,
+ }
claims := InstanceJWTClaims{
- StandardClaims: jwt.StandardClaims{
- ExpiresAt: expireToken,
+ RegisteredClaims: jwt.RegisteredClaims{
+ ExpiresAt: expires,
Issuer: "garm",
},
- ID: instance.ID,
- Name: instance.Name,
- PoolID: instance.PoolID,
- Scope: poolType,
- Entity: entity,
+ ID: instance.ID,
+ Name: instance.Name,
+ PoolID: instance.PoolID,
+ Scope: entityType,
+ Entity: entity.String(),
+ ForgeType: string(entity.Credentials.ForgeType),
+ CreateAttempt: instance.CreateAttempt,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
- tokenString, err := token.SignedString([]byte(secret))
+ tokenString, err := token.SignedString([]byte(i.jwtSecret))
if err != nil {
- return "", errors.Wrap(err, "signing token")
+ return "", fmt.Errorf("error signing token: %w", err)
}
return tokenString, nil
@@ -92,29 +120,30 @@ func (amw *instanceMiddleware) claimsToContext(ctx context.Context, claims *Inst
return nil, runnerErrors.ErrUnauthorized
}
- instanceInfo, err := amw.store.GetInstanceByName(ctx, claims.Name)
+ instanceInfo, err := amw.store.GetInstance(ctx, claims.Name)
if err != nil {
return ctx, runnerErrors.ErrUnauthorized
}
- ctx = PopulateInstanceContext(ctx, instanceInfo)
+ ctx = PopulateInstanceContext(ctx, instanceInfo, claims)
return ctx, nil
}
// Middleware implements the middleware interface
func (amw *instanceMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // nolint:golangci-lint,godox
// TODO: Log error details when authentication fails
ctx := r.Context()
authorizationHeader := r.Header.Get("authorization")
if authorizationHeader == "" {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
bearerToken := strings.Split(authorizationHeader, " ")
if len(bearerToken) != 2 {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
@@ -125,32 +154,61 @@ func (amw *instanceMiddleware) Middleware(next http.Handler) http.Handler {
}
return []byte(amw.cfg.Secret), nil
})
-
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if !token.Valid {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
ctx, err = amw.claimsToContext(ctx, claims)
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if InstanceID(ctx) == "" {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
runnerStatus := InstanceRunnerStatus(ctx)
if runnerStatus != params.RunnerInstalling && runnerStatus != params.RunnerPending {
// Instances that have finished installing can no longer authenticate to the API
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
+ return
+ }
+
+ instanceParams, err := InstanceParams(ctx)
+ if err != nil {
+ slog.InfoContext(
+ ctx, "could not find instance params",
+ "runner_name", InstanceName(ctx))
+ invalidAuthResponse(ctx, w)
+ return
+ }
+
+ // Token was generated for a previous attempt at creating this instance.
+ if claims.CreateAttempt != instanceParams.CreateAttempt {
+ slog.InfoContext(
+ ctx, "invalid token create attempt",
+ "runner_name", InstanceName(ctx),
+ "token_create_attempt", claims.CreateAttempt,
+ "instance_create_attempt", instanceParams.CreateAttempt)
+ invalidAuthResponse(ctx, w)
+ return
+ }
+
+ // Only allow instances that are in the creating or running state to authenticate.
+ if instanceParams.Status != commonParams.InstanceCreating && instanceParams.Status != commonParams.InstanceRunning {
+ slog.InfoContext(
+ ctx, "invalid instance status",
+ "runner_name", InstanceName(ctx),
+ "status", instanceParams.Status)
+ invalidAuthResponse(ctx, w)
return
}
diff --git a/auth/interfaces.go b/auth/interfaces.go
index fa5ca43c..ab68dbd7 100644
--- a/auth/interfaces.go
+++ b/auth/interfaces.go
@@ -14,9 +14,17 @@
package auth
-import "net/http"
+import (
+ "net/http"
+
+ "github.com/cloudbase/garm/params"
+)
// Middleware defines an authentication middleware
type Middleware interface {
Middleware(next http.Handler) http.Handler
}
+
+type InstanceTokenGetter interface {
+ NewInstanceJWTToken(instance params.Instance, entity params.ForgeEntity, poolType params.ForgeEntityType, ttlMinutes uint) (string, error)
+}
diff --git a/auth/jwt.go b/auth/jwt.go
index 0b6ca057..52fce0c9 100644
--- a/auth/jwt.go
+++ b/auth/jwt.go
@@ -18,16 +18,17 @@ import (
"context"
"encoding/json"
"fmt"
- "log"
+ "log/slog"
"net/http"
"strings"
+ "time"
+
+ jwt "github.com/golang-jwt/jwt/v5"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
apiParams "github.com/cloudbase/garm/apiserver/params"
"github.com/cloudbase/garm/config"
dbCommon "github.com/cloudbase/garm/database/common"
-
- "github.com/golang-jwt/jwt"
)
// JWTClaims holds JWT claims
@@ -37,7 +38,8 @@ type JWTClaims struct {
FullName string `json:"full_name"`
IsAdmin bool `json:"is_admin"`
ReadMetrics bool `json:"read_metrics"`
- jwt.StandardClaims
+ Generation uint `json:"generation"`
+ jwt.RegisteredClaims
}
// jwtMiddleware is the authentication middleware
@@ -69,63 +71,85 @@ func (amw *jwtMiddleware) claimsToContext(ctx context.Context, claims *JWTClaims
return ctx, runnerErrors.ErrUnauthorized
}
- ctx = PopulateContext(ctx, userInfo)
+ var expiresAt *time.Time
+ if claims.ExpiresAt != nil {
+ expires := claims.ExpiresAt.Time.UTC()
+ expiresAt = &expires
+ }
+
+ if userInfo.Generation != claims.Generation {
+ // Password was reset since token was issued. Invalidate.
+ return ctx, runnerErrors.ErrUnauthorized
+ }
+
+ ctx = PopulateContext(ctx, userInfo, expiresAt)
return ctx, nil
}
-func invalidAuthResponse(w http.ResponseWriter) {
+func invalidAuthResponse(ctx context.Context, w http.ResponseWriter) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)
if err := json.NewEncoder(w).Encode(
apiParams.APIErrorResponse{
Error: "Authentication failed",
}); err != nil {
- log.Printf("failed to encode response: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+func (amw *jwtMiddleware) getTokenFromRequest(r *http.Request) (string, error) {
+ authorizationHeader := r.Header.Get("authorization")
+ if authorizationHeader == "" {
+ cookie, err := r.Cookie("garm_token")
+ if err != nil {
+ return "", fmt.Errorf("failed to get cookie: %w", err)
+ }
+ return cookie.Value, nil
+ }
+
+ bearerToken := strings.Split(authorizationHeader, " ")
+ if len(bearerToken) != 2 {
+ return "", fmt.Errorf("invalid auth header")
+ }
+ return bearerToken[1], nil
+}
+
// Middleware implements the middleware interface
func (amw *jwtMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // nolint:golangci-lint,godox
// TODO: Log error details when authentication fails
ctx := r.Context()
- authorizationHeader := r.Header.Get("authorization")
- if authorizationHeader == "" {
- invalidAuthResponse(w)
+ authToken, err := amw.getTokenFromRequest(r)
+ if err != nil {
+ slog.ErrorContext(ctx, "failed to get auth token", "error", err)
+ invalidAuthResponse(ctx, w)
return
}
-
- bearerToken := strings.Split(authorizationHeader, " ")
- if len(bearerToken) != 2 {
- invalidAuthResponse(w)
- return
- }
-
claims := &JWTClaims{}
- token, err := jwt.ParseWithClaims(bearerToken[1], claims, func(token *jwt.Token) (interface{}, error) {
+ token, err := jwt.ParseWithClaims(authToken, claims, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("invalid signing method")
}
return []byte(amw.cfg.Secret), nil
})
-
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if !token.Valid {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
ctx, err = amw.claimsToContext(ctx, claims)
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if !IsEnabled(ctx) {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
diff --git a/auth/metrics.go b/auth/metrics.go
index 11c25072..5ea688e2 100644
--- a/auth/metrics.go
+++ b/auth/metrics.go
@@ -1,3 +1,16 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package auth
import (
@@ -6,9 +19,9 @@ import (
"net/http"
"strings"
- "github.com/cloudbase/garm/config"
+ jwt "github.com/golang-jwt/jwt/v5"
- "github.com/golang-jwt/jwt"
+ "github.com/cloudbase/garm/config"
)
type MetricsMiddleware struct {
@@ -23,17 +36,16 @@ func NewMetricsMiddleware(cfg config.JWTAuth) (*MetricsMiddleware, error) {
func (m *MetricsMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-
ctx := r.Context()
authorizationHeader := r.Header.Get("authorization")
if authorizationHeader == "" {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
bearerToken := strings.Split(authorizationHeader, " ")
if len(bearerToken) != 2 {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
@@ -44,20 +56,19 @@ func (m *MetricsMiddleware) Middleware(next http.Handler) http.Handler {
}
return []byte(m.cfg.Secret), nil
})
-
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if !token.Valid {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
// we fully trust the claims
if !claims.ReadMetrics {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
diff --git a/build-webapp.sh b/build-webapp.sh
new file mode 100755
index 00000000..01b13c04
--- /dev/null
+++ b/build-webapp.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -e
+
+echo "Building GARM SPA (SvelteKit)..."
+
+# Navigate to webapp directory
+cd webapp
+
+# Install dependencies if node_modules doesn't exist
+npm install
+
+# Build the SPA
+echo "Building SPA..."
+npm run build
+echo "SPA built successfully!"
diff --git a/cache/cache_test.go b/cache/cache_test.go
new file mode 100644
index 00000000..7a8ebed3
--- /dev/null
+++ b/cache/cache_test.go
@@ -0,0 +1,1040 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type CacheTestSuite struct {
+ suite.Suite
+ entity params.ForgeEntity
+}
+
+func (c *CacheTestSuite) SetupTest() {
+ c.entity = params.ForgeEntity{
+ ID: "1234",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: params.ForgeCredentials{
+ ID: 1,
+ Name: "test",
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+}
+
+func (c *CacheTestSuite) TearDownTest() {
+ // Clean up the cache after each test
+ githubToolsCache.mux.Lock()
+ defer githubToolsCache.mux.Unlock()
+ githubToolsCache.entities = make(map[string]GithubEntityTools)
+ giteaCredentialsCache.cache = make(map[uint]params.ForgeCredentials)
+ credentialsCache.cache = make(map[uint]params.ForgeCredentials)
+ instanceCache.cache = make(map[string]params.Instance)
+ entityCache = &EntityCache{
+ entities: make(map[string]EntityItem),
+ }
+}
+
+func (c *CacheTestSuite) TestCacheIsInitialized() {
+ c.Require().NotNil(githubToolsCache)
+ c.Require().NotNil(credentialsCache)
+ c.Require().NotNil(instanceCache)
+ c.Require().NotNil(entityCache)
+}
+
+func (c *CacheTestSuite) TestSetToolsCacheWorks() {
+ tools := []commonParams.RunnerApplicationDownload{
+ {
+ DownloadURL: garmTesting.Ptr("https://example.com"),
+ },
+ }
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ SetGithubToolsCache(c.entity, tools)
+ c.Require().Len(githubToolsCache.entities, 1)
+ cachedTools, err := GetGithubToolsCache(c.entity.ID)
+ c.Require().NoError(err)
+ c.Require().Len(cachedTools, 1)
+ c.Require().Equal(tools[0].GetDownloadURL(), cachedTools[0].GetDownloadURL())
+}
+
+func (c *CacheTestSuite) TestSetToolsCacheWithError() {
+ tools := []commonParams.RunnerApplicationDownload{
+ {
+ DownloadURL: garmTesting.Ptr("https://example.com"),
+ },
+ }
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ SetGithubToolsCache(c.entity, tools)
+ entity := githubToolsCache.entities[c.entity.ID]
+
+ c.Require().Equal(int64(entity.expiresAt.Sub(entity.updatedAt).Minutes()), int64(60))
+ c.Require().Len(githubToolsCache.entities, 1)
+ SetGithubToolsCacheError(c.entity, runnerErrors.ErrNotFound)
+
+ cachedTools, err := GetGithubToolsCache(c.entity.ID)
+ c.Require().Error(err)
+ c.Require().Nil(cachedTools)
+}
+
+func (c *CacheTestSuite) TestSetErrorOnNonExistingCacheEntity() {
+ entity := params.ForgeEntity{
+ ID: "non-existing-entity",
+ }
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ SetGithubToolsCacheError(entity, runnerErrors.ErrNotFound)
+
+ storedEntity, err := GetGithubToolsCache(entity.ID)
+ c.Require().Error(err)
+ c.Require().Nil(storedEntity)
+}
+
+func (c *CacheTestSuite) TestTimedOutToolsCache() {
+ tools := []commonParams.RunnerApplicationDownload{
+ {
+ DownloadURL: garmTesting.Ptr("https://example.com"),
+ },
+ }
+
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ SetGithubToolsCache(c.entity, tools)
+ entity := githubToolsCache.entities[c.entity.ID]
+
+ c.Require().Equal(int64(entity.expiresAt.Sub(entity.updatedAt).Minutes()), int64(60))
+ c.Require().Len(githubToolsCache.entities, 1)
+ entity = githubToolsCache.entities[c.entity.ID]
+ entity.updatedAt = entity.updatedAt.Add(-3 * time.Hour)
+ entity.expiresAt = entity.updatedAt.Add(-2 * time.Hour)
+ githubToolsCache.entities[c.entity.ID] = entity
+
+ cachedTools, err := GetGithubToolsCache(c.entity.ID)
+ c.Require().Error(err)
+ c.Require().Nil(cachedTools)
+}
+
+func (c *CacheTestSuite) TestGetInexistentCache() {
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ cachedTools, err := GetGithubToolsCache(c.entity.ID)
+ c.Require().Error(err)
+ c.Require().Nil(cachedTools)
+}
+
+func (c *CacheTestSuite) TestSetGithubCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGithubCredentials(credentials)
+ cachedCreds, ok := GetGithubCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+}
+
+func (c *CacheTestSuite) TestGetGithubCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGithubCredentials(credentials)
+ cachedCreds, ok := GetGithubCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ nonExisting, ok := GetGithubCredentials(2)
+ c.Require().False(ok)
+ c.Require().Equal(params.ForgeCredentials{}, nonExisting)
+}
+
+func (c *CacheTestSuite) TestDeleteGithubCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGithubCredentials(credentials)
+ cachedCreds, ok := GetGithubCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ DeleteGithubCredentials(1)
+ cachedCreds, ok = GetGithubCredentials(1)
+ c.Require().False(ok)
+ c.Require().Equal(params.ForgeCredentials{}, cachedCreds)
+}
+
+func (c *CacheTestSuite) TestGetAllGithubCredentials() {
+ credentials1 := params.ForgeCredentials{
+ ID: 1,
+ }
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ }
+ SetGithubCredentials(credentials1)
+ SetGithubCredentials(credentials2)
+
+ cachedCreds := GetAllGithubCredentials()
+ c.Require().Len(cachedCreds, 2)
+ c.Require().Contains(cachedCreds, credentials1)
+ c.Require().Contains(cachedCreds, credentials2)
+}
+
+func (c *CacheTestSuite) TestSetInstanceCache() {
+ instance := params.Instance{
+ Name: "test-instance",
+ }
+ SetInstanceCache(instance)
+ cachedInstance, ok := GetInstanceCache("test-instance")
+ c.Require().True(ok)
+ c.Require().Equal(instance.Name, cachedInstance.Name)
+}
+
+func (c *CacheTestSuite) TestGetInstanceCache() {
+ instance := params.Instance{
+ Name: "test-instance",
+ }
+ SetInstanceCache(instance)
+ cachedInstance, ok := GetInstanceCache("test-instance")
+ c.Require().True(ok)
+ c.Require().Equal(instance.Name, cachedInstance.Name)
+
+ nonExisting, ok := GetInstanceCache("non-existing")
+ c.Require().False(ok)
+ c.Require().Equal(params.Instance{}, nonExisting)
+}
+
+func (c *CacheTestSuite) TestDeleteInstanceCache() {
+ instance := params.Instance{
+ Name: "test-instance",
+ }
+ SetInstanceCache(instance)
+ cachedInstance, ok := GetInstanceCache("test-instance")
+ c.Require().True(ok)
+ c.Require().Equal(instance.Name, cachedInstance.Name)
+
+ DeleteInstanceCache("test-instance")
+ cachedInstance, ok = GetInstanceCache("test-instance")
+ c.Require().False(ok)
+ c.Require().Equal(params.Instance{}, cachedInstance)
+}
+
+func (c *CacheTestSuite) TestGetAllInstances() {
+ instance1 := params.Instance{
+ Name: "test-instance-1",
+ }
+ instance2 := params.Instance{
+ Name: "test-instance-2",
+ }
+ SetInstanceCache(instance1)
+ SetInstanceCache(instance2)
+
+ cachedInstances := GetAllInstancesCache()
+ c.Require().Len(cachedInstances, 2)
+ c.Require().Contains(cachedInstances, instance1)
+ c.Require().Contains(cachedInstances, instance2)
+}
+
+func (c *CacheTestSuite) TestGetInstancesForPool() {
+ instance1 := params.Instance{
+ Name: "test-instance-1",
+ PoolID: "pool-1",
+ }
+ instance2 := params.Instance{
+ Name: "test-instance-2",
+ PoolID: "pool-1",
+ }
+ instance3 := params.Instance{
+ Name: "test-instance-3",
+ PoolID: "pool-2",
+ }
+ SetInstanceCache(instance1)
+ SetInstanceCache(instance2)
+ SetInstanceCache(instance3)
+
+ cachedInstances := GetInstancesForPool("pool-1")
+ c.Require().Len(cachedInstances, 2)
+ c.Require().Contains(cachedInstances, instance1)
+ c.Require().Contains(cachedInstances, instance2)
+
+ cachedInstances = GetInstancesForPool("pool-2")
+ c.Require().Len(cachedInstances, 1)
+ c.Require().Contains(cachedInstances, instance3)
+}
+
+func (c *CacheTestSuite) TestGetInstancesForScaleSet() {
+ instance1 := params.Instance{
+ Name: "test-instance-1",
+ ScaleSetID: 1,
+ }
+ instance2 := params.Instance{
+ Name: "test-instance-2",
+ ScaleSetID: 1,
+ }
+ instance3 := params.Instance{
+ Name: "test-instance-3",
+ ScaleSetID: 2,
+ }
+ SetInstanceCache(instance1)
+ SetInstanceCache(instance2)
+ SetInstanceCache(instance3)
+
+ cachedInstances := GetInstancesForScaleSet(1)
+ c.Require().Len(cachedInstances, 2)
+ c.Require().Contains(cachedInstances, instance1)
+ c.Require().Contains(cachedInstances, instance2)
+
+ cachedInstances = GetInstancesForScaleSet(2)
+ c.Require().Len(cachedInstances, 1)
+ c.Require().Contains(cachedInstances, instance3)
+}
+
+func (c *CacheTestSuite) TestSetGetEntityCache() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ SetEntity(entity)
+ cachedEntity, ok := GetEntity("test-entity")
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ pool := params.Pool{
+ ID: "pool-1",
+ }
+ SetEntityPool(entity.ID, pool)
+ cachedEntityPools := GetEntityPools("test-entity")
+ c.Require().Equal(1, len(cachedEntityPools))
+
+ entity.Credentials.Description = "test description"
+ SetEntity(entity)
+ cachedEntity, ok = GetEntity("test-entity")
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ c.Require().Equal(entity.Credentials.Description, cachedEntity.Credentials.Description)
+
+ // Make sure we don't clobber pools after updating the entity
+ cachedEntityPools = GetEntityPools("test-entity")
+ c.Require().Equal(1, len(cachedEntityPools))
+}
+
+func (c *CacheTestSuite) TestReplaceEntityPools() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: params.ForgeCredentials{
+ ID: 1,
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+ pool1 := params.Pool{
+ ID: "pool-1",
+ }
+ pool2 := params.Pool{
+ ID: "pool-2",
+ }
+
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Name: "test",
+ ForgeType: params.GithubEndpointType,
+ }
+ SetGithubCredentials(credentials)
+
+ SetEntity(entity)
+ ReplaceEntityPools(entity.ID, []params.Pool{pool1, pool2})
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ c.Require().Equal("test", cachedEntity.Credentials.Name)
+
+ pools := GetEntityPools(entity.ID)
+ c.Require().Len(pools, 2)
+ c.Require().Contains(pools, pool1)
+ c.Require().Contains(pools, pool2)
+}
+
+func (c *CacheTestSuite) TestReplaceEntityScaleSets() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet1 := params.ScaleSet{
+ ID: 1,
+ }
+ scaleSet2 := params.ScaleSet{
+ ID: 2,
+ }
+
+ SetEntity(entity)
+ ReplaceEntityScaleSets(entity.ID, []params.ScaleSet{scaleSet1, scaleSet2})
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ scaleSets := GetEntityScaleSets(entity.ID)
+ c.Require().Len(scaleSets, 2)
+ c.Require().Contains(scaleSets, scaleSet1)
+ c.Require().Contains(scaleSets, scaleSet2)
+}
+
+func (c *CacheTestSuite) TestDeleteEntity() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ SetEntity(entity)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ DeleteEntity(entity.ID)
+ cachedEntity, ok = GetEntity(entity.ID)
+ c.Require().False(ok)
+ c.Require().Equal(params.ForgeEntity{}, cachedEntity)
+}
+
+func (c *CacheTestSuite) TestSetEntityPool() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool := params.Pool{
+ ID: "pool-1",
+ }
+
+ SetEntity(entity)
+
+ SetEntityPool(entity.ID, pool)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools := GetEntityPools(entity.ID)
+ c.Require().Len(pools, 1)
+ c.Require().Contains(pools, pool)
+ c.Require().False(pools[0].Enabled)
+
+ pool.Enabled = true
+ SetEntityPool(entity.ID, pool)
+ cachedEntity, ok = GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools = GetEntityPools(entity.ID)
+ c.Require().Len(pools, 1)
+ c.Require().Contains(pools, pool)
+ c.Require().True(pools[0].Enabled)
+}
+
+func (c *CacheTestSuite) TestSetEntityScaleSet() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet := params.ScaleSet{
+ ID: 1,
+ }
+
+ SetEntity(entity)
+ SetEntityScaleSet(entity.ID, scaleSet)
+
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ scaleSets := GetEntityScaleSets(entity.ID)
+ c.Require().Len(scaleSets, 1)
+ c.Require().Contains(scaleSets, scaleSet)
+ c.Require().False(scaleSets[0].Enabled)
+
+ scaleSet.Enabled = true
+ SetEntityScaleSet(entity.ID, scaleSet)
+ scaleSets = GetEntityScaleSets(entity.ID)
+ c.Require().Len(scaleSets, 1)
+ c.Require().Contains(scaleSets, scaleSet)
+ c.Require().True(scaleSets[0].Enabled)
+}
+
+func (c *CacheTestSuite) TestDeleteEntityPool() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool := params.Pool{
+ ID: "pool-1",
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ DeleteEntityPool(entity.ID, pool.ID)
+ pools := GetEntityPools(entity.ID)
+ c.Require().Len(pools, 0)
+ c.Require().NotContains(pools, pool)
+}
+
+func (c *CacheTestSuite) TestDeleteEntityScaleSet() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet := params.ScaleSet{
+ ID: 1,
+ }
+
+ SetEntity(entity)
+ SetEntityScaleSet(entity.ID, scaleSet)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ DeleteEntityScaleSet(entity.ID, scaleSet.ID)
+ scaleSets := GetEntityScaleSets(entity.ID)
+ c.Require().Len(scaleSets, 0)
+ c.Require().NotContains(scaleSets, scaleSet)
+}
+
+func (c *CacheTestSuite) TestFindPoolsMatchingAllTags() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool1 := params.Pool{
+ ID: "pool-1",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag2",
+ },
+ },
+ }
+ pool2 := params.Pool{
+ ID: "pool-2",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ },
+ }
+ pool3 := params.Pool{
+ ID: "pool-3",
+ Tags: []params.Tag{
+ {
+ Name: "tag3",
+ },
+ },
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool1)
+ SetEntityPool(entity.ID, pool2)
+ SetEntityPool(entity.ID, pool3)
+
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools := FindPoolsMatchingAllTags(entity.ID, []string{"tag1", "tag2"})
+ c.Require().Len(pools, 1)
+ c.Require().Contains(pools, pool1)
+ pools = FindPoolsMatchingAllTags(entity.ID, []string{"tag1"})
+ c.Require().Len(pools, 2)
+ c.Require().Contains(pools, pool1)
+ c.Require().Contains(pools, pool2)
+ pools = FindPoolsMatchingAllTags(entity.ID, []string{"tag3"})
+ c.Require().Len(pools, 1)
+ c.Require().Contains(pools, pool3)
+ pools = FindPoolsMatchingAllTags(entity.ID, []string{"tag4"})
+ c.Require().Len(pools, 0)
+}
+
+func (c *CacheTestSuite) TestGetEntityPools() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool1 := params.Pool{
+ ID: "pool-1",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag2",
+ },
+ },
+ }
+ pool2 := params.Pool{
+ ID: "pool-2",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag3",
+ },
+ },
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool1)
+ SetEntityPool(entity.ID, pool2)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools := GetEntityPools(entity.ID)
+ c.Require().Len(pools, 2)
+ c.Require().Contains(pools, pool1)
+ c.Require().Contains(pools, pool2)
+}
+
+func (c *CacheTestSuite) TestGetEntityScaleSet() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet := params.ScaleSet{
+ ID: 1,
+ }
+
+ SetEntity(entity)
+ SetEntityScaleSet(entity.ID, scaleSet)
+
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ scaleSets, ok := GetEntityScaleSet(entity.ID, scaleSet.ID)
+ c.Require().True(ok)
+ c.Require().Equal(scaleSet.ID, scaleSets.ID)
+}
+
+func (c *CacheTestSuite) TestGetEntityPool() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+
+ pool := params.Pool{
+ ID: "pool-1",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag2",
+ },
+ },
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ poolFromCache, ok := GetEntityPool(entity.ID, pool.ID)
+ c.Require().True(ok)
+ c.Require().Equal(pool.ID, poolFromCache.ID)
+}
+
+func (c *CacheTestSuite) TestSetGiteaCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ }
+ SetGiteaCredentials(credentials)
+ cachedCreds, ok := GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ cachedCreds.Description = "new description"
+ SetGiteaCredentials(cachedCreds)
+ cachedCreds, ok = GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+ c.Require().Equal("new description", cachedCreds.Description)
+}
+
+func (c *CacheTestSuite) TestGetAllGiteaCredentials() {
+ credentials1 := params.ForgeCredentials{
+ ID: 1,
+ }
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ }
+ SetGiteaCredentials(credentials1)
+ SetGiteaCredentials(credentials2)
+
+ cachedCreds := GetAllGiteaCredentials()
+ c.Require().Len(cachedCreds, 2)
+ c.Require().Contains(cachedCreds, credentials1)
+ c.Require().Contains(cachedCreds, credentials2)
+}
+
+func (c *CacheTestSuite) TestDeleteGiteaCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGiteaCredentials(credentials)
+ cachedCreds, ok := GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ DeleteGiteaCredentials(1)
+ cachedCreds, ok = GetGiteaCredentials(1)
+ c.Require().False(ok)
+ c.Require().Equal(params.ForgeCredentials{}, cachedCreds)
+}
+
+func (c *CacheTestSuite) TestDeleteGiteaCredentialsNotFound() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGiteaCredentials(credentials)
+ cachedCreds, ok := GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ DeleteGiteaCredentials(2)
+ cachedCreds, ok = GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+}
+
+func (c *CacheTestSuite) TestUpdateCredentialsInAffectedEntities() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ }
+ entity1 := params.ForgeEntity{
+ ID: "test-entity-1",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+
+ entity2 := params.ForgeEntity{
+ ID: "test-entity-2",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+
+ SetEntity(entity1)
+ SetEntity(entity2)
+
+ cachedEntity1, ok := GetEntity(entity1.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity1.ID, cachedEntity1.ID)
+ cachedEntity2, ok := GetEntity(entity2.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity2.ID, cachedEntity2.ID)
+
+ c.Require().Equal(credentials.ID, cachedEntity1.Credentials.ID)
+ c.Require().Equal(credentials.ID, cachedEntity2.Credentials.ID)
+ c.Require().Equal(credentials.Description, cachedEntity1.Credentials.Description)
+ c.Require().Equal(credentials.Description, cachedEntity2.Credentials.Description)
+
+ credentials.Description = "new description"
+ SetGiteaCredentials(credentials)
+
+ cachedEntity1, ok = GetEntity(entity1.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity1.ID, cachedEntity1.ID)
+ cachedEntity2, ok = GetEntity(entity2.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity2.ID, cachedEntity2.ID)
+
+ c.Require().Equal(credentials.ID, cachedEntity1.Credentials.ID)
+ c.Require().Equal(credentials.ID, cachedEntity2.Credentials.ID)
+ c.Require().Equal(credentials.Description, cachedEntity1.Credentials.Description)
+ c.Require().Equal(credentials.Description, cachedEntity2.Credentials.Description)
+}
+
+func (c *CacheTestSuite) TestSetGiteaEntity() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ ForgeType: params.GiteaEndpointType,
+ }
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+
+ SetGiteaCredentials(credentials)
+ SetEntity(entity)
+
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ c.Require().Equal(credentials.ID, cachedEntity.Credentials.ID)
+ c.Require().Equal(credentials.Description, cachedEntity.Credentials.Description)
+ c.Require().Equal(credentials.ForgeType, cachedEntity.Credentials.ForgeType)
+}
+
+func (c *CacheTestSuite) TestGetEntitiesUsingCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ Name: "test",
+ ForgeType: params.GithubEndpointType,
+ }
+
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ Description: "test description2",
+ Name: "test",
+ ForgeType: params.GiteaEndpointType,
+ }
+
+ entity1 := params.ForgeEntity{
+ ID: "test-entity-1",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+
+ entity2 := params.ForgeEntity{
+ ID: "test-entity-2",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+ entity3 := params.ForgeEntity{
+ ID: "test-entity-3",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials2,
+ }
+
+ SetEntity(entity1)
+ SetEntity(entity2)
+ SetEntity(entity3)
+
+ cachedEntities := GetEntitiesUsingCredentials(credentials)
+ c.Require().Len(cachedEntities, 2)
+ c.Require().Contains(cachedEntities, entity1)
+ c.Require().Contains(cachedEntities, entity2)
+
+ cachedEntities = GetEntitiesUsingCredentials(credentials2)
+ c.Require().Len(cachedEntities, 1)
+ c.Require().Contains(cachedEntities, entity3)
+}
+
+func (c *CacheTestSuite) TestGetallEntities() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ Name: "test",
+ ForgeType: params.GithubEndpointType,
+ }
+
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ Description: "test description2",
+ Name: "test",
+ ForgeType: params.GiteaEndpointType,
+ }
+
+ entity1 := params.ForgeEntity{
+ ID: "test-entity-1",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ CreatedAt: time.Now(),
+ }
+
+ entity2 := params.ForgeEntity{
+ ID: "test-entity-2",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ CreatedAt: time.Now().Add(1 * time.Second),
+ }
+
+ entity3 := params.ForgeEntity{
+ ID: "test-entity-3",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials2,
+ CreatedAt: time.Now().Add(2 * time.Second),
+ }
+
+ SetEntity(entity1)
+ SetEntity(entity2)
+ SetEntity(entity3)
+
+ // Sorted by creation date
+ cachedEntities := GetAllEntities()
+ c.Require().Len(cachedEntities, 3)
+ c.Require().Equal(cachedEntities[0], entity1)
+ c.Require().Equal(cachedEntities[1], entity2)
+ c.Require().Equal(cachedEntities[2], entity3)
+}
+
+func (c *CacheTestSuite) TestGetAllPools() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool1 := params.Pool{
+ ID: "pool-1",
+ CreatedAt: time.Now(),
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag2",
+ },
+ },
+ }
+
+ pool2 := params.Pool{
+ ID: "pool-2",
+ CreatedAt: time.Now().Add(1 * time.Second),
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag3",
+ },
+ },
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool1)
+ SetEntityPool(entity.ID, pool2)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools := GetAllPools()
+ c.Require().Len(pools, 2)
+ c.Require().Equal(pools[0].ID, pool1.ID)
+ c.Require().Equal(pools[1].ID, pool2.ID)
+}
+
+func (c *CacheTestSuite) TestGetAllScaleSets() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet1 := params.ScaleSet{
+ ID: 1,
+ }
+ scaleSet2 := params.ScaleSet{
+ ID: 2,
+ }
+
+ SetEntity(entity)
+ SetEntityScaleSet(entity.ID, scaleSet1)
+ SetEntityScaleSet(entity.ID, scaleSet2)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ scaleSets := GetAllScaleSets()
+ c.Require().Len(scaleSets, 2)
+ c.Require().Equal(scaleSets[0].ID, scaleSet1.ID)
+ c.Require().Equal(scaleSets[1].ID, scaleSet2.ID)
+}
+
+func (c *CacheTestSuite) TestGetAllGetAllGithubCredentialsAsMap() {
+ credentials1 := params.ForgeCredentials{
+ ID: 1,
+ }
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ }
+ SetGithubCredentials(credentials1)
+ SetGithubCredentials(credentials2)
+
+ cachedCreds := GetAllGithubCredentialsAsMap()
+ c.Require().Len(cachedCreds, 2)
+ c.Require().Contains(cachedCreds, credentials1.ID)
+ c.Require().Contains(cachedCreds, credentials2.ID)
+}
+
+func (c *CacheTestSuite) TestGetAllGiteaCredentialsAsMap() {
+ credentials1 := params.ForgeCredentials{
+ ID: 1,
+ CreatedAt: time.Now(),
+ }
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ CreatedAt: time.Now().Add(1 * time.Second),
+ }
+ SetGiteaCredentials(credentials1)
+ SetGiteaCredentials(credentials2)
+
+ cachedCreds := GetAllGiteaCredentialsAsMap()
+ c.Require().Len(cachedCreds, 2)
+ c.Require().Contains(cachedCreds, credentials1.ID)
+ c.Require().Contains(cachedCreds, credentials2.ID)
+}
+
+func TestCacheTestSuite(t *testing.T) {
+ t.Parallel()
+ suite.Run(t, new(CacheTestSuite))
+}
diff --git a/cache/credentials_cache.go b/cache/credentials_cache.go
new file mode 100644
index 00000000..3cb5c71d
--- /dev/null
+++ b/cache/credentials_cache.go
@@ -0,0 +1,148 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sync"
+
+ "github.com/cloudbase/garm/params"
+)
+
+var (
+ credentialsCache *CredentialCache
+ giteaCredentialsCache *CredentialCache
+)
+
+func init() {
+ ghCredentialsCache := &CredentialCache{
+ cache: make(map[uint]params.ForgeCredentials),
+ }
+ gtCredentialsCache := &CredentialCache{
+ cache: make(map[uint]params.ForgeCredentials),
+ }
+
+ credentialsCache = ghCredentialsCache
+ giteaCredentialsCache = gtCredentialsCache
+}
+
+type CredentialCache struct {
+ mux sync.Mutex
+
+ cache map[uint]params.ForgeCredentials
+}
+
+func (g *CredentialCache) SetCredentialsRateLimit(credsID uint, rateLimit params.GithubRateLimit) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ if creds, ok := g.cache[credsID]; ok {
+ creds.RateLimit = &rateLimit
+ g.cache[credsID] = creds
+ }
+}
+
+func (g *CredentialCache) SetCredentials(credentials params.ForgeCredentials) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ g.cache[credentials.ID] = credentials
+ UpdateCredentialsInAffectedEntities(credentials)
+}
+
+func (g *CredentialCache) GetCredentials(id uint) (params.ForgeCredentials, bool) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ if creds, ok := g.cache[id]; ok {
+ return creds, true
+ }
+ return params.ForgeCredentials{}, false
+}
+
+func (g *CredentialCache) DeleteCredentials(id uint) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ delete(g.cache, id)
+}
+
+func (g *CredentialCache) GetAllCredentials() []params.ForgeCredentials {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ creds := make([]params.ForgeCredentials, 0, len(g.cache))
+ for _, cred := range g.cache {
+ creds = append(creds, cred)
+ }
+
+ // Sort the credentials by ID
+ sortByID(creds)
+ return creds
+}
+
+func (g *CredentialCache) GetAllCredentialsAsMap() map[uint]params.ForgeCredentials {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ creds := make(map[uint]params.ForgeCredentials, len(g.cache))
+ for id, cred := range g.cache {
+ creds[id] = cred
+ }
+
+ return creds
+}
+
+func SetGithubCredentials(credentials params.ForgeCredentials) {
+ credentialsCache.SetCredentials(credentials)
+}
+
+func GetGithubCredentials(id uint) (params.ForgeCredentials, bool) {
+ return credentialsCache.GetCredentials(id)
+}
+
+func DeleteGithubCredentials(id uint) {
+ credentialsCache.DeleteCredentials(id)
+}
+
+func GetAllGithubCredentials() []params.ForgeCredentials {
+ return credentialsCache.GetAllCredentials()
+}
+
+func SetCredentialsRateLimit(credsID uint, rateLimit params.GithubRateLimit) {
+ credentialsCache.SetCredentialsRateLimit(credsID, rateLimit)
+}
+
+func GetAllGithubCredentialsAsMap() map[uint]params.ForgeCredentials {
+ return credentialsCache.GetAllCredentialsAsMap()
+}
+
+func SetGiteaCredentials(credentials params.ForgeCredentials) {
+ giteaCredentialsCache.SetCredentials(credentials)
+}
+
+func GetGiteaCredentials(id uint) (params.ForgeCredentials, bool) {
+ return giteaCredentialsCache.GetCredentials(id)
+}
+
+func DeleteGiteaCredentials(id uint) {
+ giteaCredentialsCache.DeleteCredentials(id)
+}
+
+func GetAllGiteaCredentials() []params.ForgeCredentials {
+ return giteaCredentialsCache.GetAllCredentials()
+}
+
+func GetAllGiteaCredentialsAsMap() map[uint]params.ForgeCredentials {
+ return giteaCredentialsCache.GetAllCredentialsAsMap()
+}
diff --git a/cache/entity_cache.go b/cache/entity_cache.go
new file mode 100644
index 00000000..c676332f
--- /dev/null
+++ b/cache/entity_cache.go
@@ -0,0 +1,435 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sync"
+ "time"
+
+ "github.com/cloudbase/garm/params"
+)
+
+var entityCache *EntityCache
+
+func init() {
+ ghEntityCache := &EntityCache{
+ entities: make(map[string]EntityItem),
+ }
+ entityCache = ghEntityCache
+}
+
+type RunnerGroupEntry struct {
+ RunnerGroupID int64
+ time time.Time
+}
+
+type EntityItem struct {
+ Entity params.ForgeEntity
+ Pools map[string]params.Pool
+ ScaleSets map[uint]params.ScaleSet
+ RunnerGroups map[string]RunnerGroupEntry
+}
+
+type EntityCache struct {
+ mux sync.Mutex
+ // entity IDs are UUID4s. It is highly unlikely they will collide (🤞).
+ entities map[string]EntityItem
+}
+
+func (e *EntityCache) UpdateCredentialsInAffectedEntities(creds params.ForgeCredentials) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ for entityID, cache := range e.entities {
+ if cache.Entity.Credentials.GetID() == creds.GetID() {
+ cache.Entity.Credentials = creds
+ e.entities[entityID] = cache
+ }
+ }
+}
+
+func (e *EntityCache) GetEntity(entityID string) (params.ForgeEntity, bool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ var creds params.ForgeCredentials
+ var ok bool
+ switch cache.Entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ creds, ok = GetGithubCredentials(cache.Entity.Credentials.ID)
+ case params.GiteaEndpointType:
+ creds, ok = GetGiteaCredentials(cache.Entity.Credentials.ID)
+ }
+ if ok {
+ cache.Entity.Credentials = creds
+ }
+ return cache.Entity, true
+ }
+ return params.ForgeEntity{}, false
+}
+
+func (e *EntityCache) SetEntity(entity params.ForgeEntity) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ cache, ok := e.entities[entity.ID]
+ if !ok {
+ e.entities[entity.ID] = EntityItem{
+ Entity: entity,
+ Pools: make(map[string]params.Pool),
+ ScaleSets: make(map[uint]params.ScaleSet),
+ RunnerGroups: make(map[string]RunnerGroupEntry),
+ }
+ return
+ }
+ cache.Entity = entity
+ e.entities[entity.ID] = cache
+}
+
+func (e *EntityCache) ReplaceEntityPools(entityID string, pools []params.Pool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ cache, ok := e.entities[entityID]
+ if !ok {
+ return
+ }
+
+ poolsByID := map[string]params.Pool{}
+ for _, pool := range pools {
+ poolsByID[pool.ID] = pool
+ }
+ cache.Pools = poolsByID
+ e.entities[entityID] = cache
+}
+
+func (e *EntityCache) ReplaceEntityScaleSets(entityID string, scaleSets []params.ScaleSet) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ cache, ok := e.entities[entityID]
+ if !ok {
+ return
+ }
+
+ scaleSetsByID := map[uint]params.ScaleSet{}
+ for _, scaleSet := range scaleSets {
+ scaleSetsByID[scaleSet.ID] = scaleSet
+ }
+ cache.ScaleSets = scaleSetsByID
+ e.entities[entityID] = cache
+}
+
+func (e *EntityCache) DeleteEntity(entityID string) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+ delete(e.entities, entityID)
+}
+
+func (e *EntityCache) SetEntityPool(entityID string, pool params.Pool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ cache.Pools[pool.ID] = pool
+ e.entities[entityID] = cache
+ }
+}
+
+func (e *EntityCache) SetEntityScaleSet(entityID string, scaleSet params.ScaleSet) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ cache.ScaleSets[scaleSet.ID] = scaleSet
+ e.entities[entityID] = cache
+ }
+}
+
+func (e *EntityCache) DeleteEntityPool(entityID string, poolID string) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ delete(cache.Pools, poolID)
+ e.entities[entityID] = cache
+ }
+}
+
+func (e *EntityCache) DeleteEntityScaleSet(entityID string, scaleSetID uint) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ delete(cache.ScaleSets, scaleSetID)
+ e.entities[entityID] = cache
+ }
+}
+
+func (e *EntityCache) GetEntityPool(entityID string, poolID string) (params.Pool, bool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ if pool, ok := cache.Pools[poolID]; ok {
+ return pool, true
+ }
+ }
+ return params.Pool{}, false
+}
+
+func (e *EntityCache) GetEntityScaleSet(entityID string, scaleSetID uint) (params.ScaleSet, bool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ if scaleSet, ok := cache.ScaleSets[scaleSetID]; ok {
+ return scaleSet, true
+ }
+ }
+ return params.ScaleSet{}, false
+}
+
+func (e *EntityCache) FindPoolsMatchingAllTags(entityID string, tags []string) []params.Pool {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ var pools []params.Pool
+ for _, pool := range cache.Pools {
+ if pool.HasRequiredLabels(tags) {
+ pools = append(pools, pool)
+ }
+ }
+ // Sort the pools by creation date.
+ sortByCreationDate(pools)
+ return pools
+ }
+ return nil
+}
+
+func (e *EntityCache) GetEntityPools(entityID string) []params.Pool {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ var pools []params.Pool
+ for _, pool := range cache.Pools {
+ pools = append(pools, pool)
+ }
+ // Sort the pools by creation date.
+ sortByCreationDate(pools)
+ return pools
+ }
+ return nil
+}
+
+func (e *EntityCache) GetEntityScaleSets(entityID string) []params.ScaleSet {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ var scaleSets []params.ScaleSet
+ for _, scaleSet := range cache.ScaleSets {
+ scaleSets = append(scaleSets, scaleSet)
+ }
+ // Sort the scale sets by creation date.
+ sortByID(scaleSets)
+ return scaleSets
+ }
+ return nil
+}
+
+func (e *EntityCache) GetEntitiesUsingCredentials(creds params.ForgeCredentials) []params.ForgeEntity {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ var entities []params.ForgeEntity
+ for _, cache := range e.entities {
+ if cache.Entity.Credentials.ForgeType != creds.ForgeType {
+ continue
+ }
+
+ if cache.Entity.Credentials.GetID() == creds.GetID() {
+ entities = append(entities, cache.Entity)
+ }
+ }
+ sortByCreationDate(entities)
+ return entities
+}
+
+func (e *EntityCache) GetAllEntities() []params.ForgeEntity {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ var entities []params.ForgeEntity
+ for _, cache := range e.entities {
+ // Get the credentials from the credentials cache.
+ var creds params.ForgeCredentials
+ var ok bool
+ switch cache.Entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ creds, ok = GetGithubCredentials(cache.Entity.Credentials.ID)
+ case params.GiteaEndpointType:
+ creds, ok = GetGiteaCredentials(cache.Entity.Credentials.ID)
+ }
+ if ok {
+ cache.Entity.Credentials = creds
+ }
+ entities = append(entities, cache.Entity)
+ }
+ sortByCreationDate(entities)
+ return entities
+}
+
+func (e *EntityCache) GetAllPools() []params.Pool {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ var pools []params.Pool
+ for _, cache := range e.entities {
+ for _, pool := range cache.Pools {
+ pools = append(pools, pool)
+ }
+ }
+ sortByCreationDate(pools)
+ return pools
+}
+
+func (e *EntityCache) GetAllScaleSets() []params.ScaleSet {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ var scaleSets []params.ScaleSet
+ for _, cache := range e.entities {
+ for _, scaleSet := range cache.ScaleSets {
+ scaleSets = append(scaleSets, scaleSet)
+ }
+ }
+ sortByID(scaleSets)
+ return scaleSets
+}
+
+func (e *EntityCache) SetEntityRunnerGroup(entityID, runnerGroupName string, runnerGroupID int64) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if _, ok := e.entities[entityID]; ok {
+ e.entities[entityID].RunnerGroups[runnerGroupName] = RunnerGroupEntry{
+ RunnerGroupID: runnerGroupID,
+ time: time.Now().UTC(),
+ }
+ }
+}
+
+func (e *EntityCache) GetEntityRunnerGroup(entityID, runnerGroupName string) (int64, bool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if _, ok := e.entities[entityID]; ok {
+ if runnerGroup, ok := e.entities[entityID].RunnerGroups[runnerGroupName]; ok {
+ if time.Now().UTC().After(runnerGroup.time.Add(1 * time.Hour)) {
+ delete(e.entities[entityID].RunnerGroups, runnerGroupName)
+ return 0, false
+ }
+ return runnerGroup.RunnerGroupID, true
+ }
+ }
+ return 0, false
+}
+
+func SetEntityRunnerGroup(entityID, runnerGroupName string, runnerGroupID int64) {
+ entityCache.SetEntityRunnerGroup(entityID, runnerGroupName, runnerGroupID)
+}
+
+func GetEntityRunnerGroup(entityID, runnerGroupName string) (int64, bool) {
+ return entityCache.GetEntityRunnerGroup(entityID, runnerGroupName)
+}
+
+func GetEntity(entityID string) (params.ForgeEntity, bool) {
+ return entityCache.GetEntity(entityID)
+}
+
+func SetEntity(entity params.ForgeEntity) {
+ entityCache.SetEntity(entity)
+}
+
+func ReplaceEntityPools(entityID string, pools []params.Pool) {
+ entityCache.ReplaceEntityPools(entityID, pools)
+}
+
+func ReplaceEntityScaleSets(entityID string, scaleSets []params.ScaleSet) {
+ entityCache.ReplaceEntityScaleSets(entityID, scaleSets)
+}
+
+func DeleteEntity(entityID string) {
+ entityCache.DeleteEntity(entityID)
+}
+
+func SetEntityPool(entityID string, pool params.Pool) {
+ entityCache.SetEntityPool(entityID, pool)
+}
+
+func SetEntityScaleSet(entityID string, scaleSet params.ScaleSet) {
+ entityCache.SetEntityScaleSet(entityID, scaleSet)
+}
+
+func DeleteEntityPool(entityID string, poolID string) {
+ entityCache.DeleteEntityPool(entityID, poolID)
+}
+
+func DeleteEntityScaleSet(entityID string, scaleSetID uint) {
+ entityCache.DeleteEntityScaleSet(entityID, scaleSetID)
+}
+
+func GetEntityPool(entityID string, poolID string) (params.Pool, bool) {
+ return entityCache.GetEntityPool(entityID, poolID)
+}
+
+func GetEntityScaleSet(entityID string, scaleSetID uint) (params.ScaleSet, bool) {
+ return entityCache.GetEntityScaleSet(entityID, scaleSetID)
+}
+
+func FindPoolsMatchingAllTags(entityID string, tags []string) []params.Pool {
+ return entityCache.FindPoolsMatchingAllTags(entityID, tags)
+}
+
+func GetEntityPools(entityID string) []params.Pool {
+ return entityCache.GetEntityPools(entityID)
+}
+
+func GetEntityScaleSets(entityID string) []params.ScaleSet {
+ return entityCache.GetEntityScaleSets(entityID)
+}
+
+func UpdateCredentialsInAffectedEntities(creds params.ForgeCredentials) {
+ entityCache.UpdateCredentialsInAffectedEntities(creds)
+}
+
+func GetEntitiesUsingCredentials(creds params.ForgeCredentials) []params.ForgeEntity {
+ return entityCache.GetEntitiesUsingCredentials(creds)
+}
+
+func GetAllEntities() []params.ForgeEntity {
+ return entityCache.GetAllEntities()
+}
+
+func GetAllPools() []params.Pool {
+ return entityCache.GetAllPools()
+}
+
+func GetAllScaleSets() []params.ScaleSet {
+ return entityCache.GetAllScaleSets()
+}
diff --git a/cache/github_client.go b/cache/github_client.go
new file mode 100644
index 00000000..179a9718
--- /dev/null
+++ b/cache/github_client.go
@@ -0,0 +1,60 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sync"
+
+ "github.com/cloudbase/garm/runner/common"
+)
+
+var ghClientCache *GithubClientCache
+
+type GithubClientCache struct {
+ mux sync.Mutex
+
+ cache map[string]common.GithubClient
+}
+
+func init() {
+ clientCache := &GithubClientCache{
+ cache: make(map[string]common.GithubClient),
+ }
+ ghClientCache = clientCache
+}
+
+func (g *GithubClientCache) SetClient(entityID string, client common.GithubClient) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ g.cache[entityID] = client
+}
+
+func (g *GithubClientCache) GetClient(entityID string) (common.GithubClient, bool) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ if client, ok := g.cache[entityID]; ok {
+ return client, true
+ }
+ return nil, false
+}
+
+func SetGithubClient(entityID string, client common.GithubClient) {
+ ghClientCache.SetClient(entityID, client)
+}
+
+func GetGithubClient(entityID string) (common.GithubClient, bool) {
+ return ghClientCache.GetClient(entityID)
+}
diff --git a/cache/instance_cache.go b/cache/instance_cache.go
new file mode 100644
index 00000000..ae2c1cec
--- /dev/null
+++ b/cache/instance_cache.go
@@ -0,0 +1,143 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sync"
+
+ "github.com/cloudbase/garm/params"
+)
+
+var instanceCache *InstanceCache
+
+func init() {
+ cache := &InstanceCache{
+ cache: make(map[string]params.Instance),
+ }
+ instanceCache = cache
+}
+
+type InstanceCache struct {
+ mux sync.Mutex
+
+ cache map[string]params.Instance
+}
+
+func (i *InstanceCache) SetInstance(instance params.Instance) {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ i.cache[instance.Name] = instance
+}
+
+func (i *InstanceCache) GetInstance(name string) (params.Instance, bool) {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ if instance, ok := i.cache[name]; ok {
+ return instance, true
+ }
+ return params.Instance{}, false
+}
+
+func (i *InstanceCache) DeleteInstance(name string) {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ delete(i.cache, name)
+}
+
+func (i *InstanceCache) GetAllInstances() []params.Instance {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ instances := make([]params.Instance, 0, len(i.cache))
+ for _, instance := range i.cache {
+ instances = append(instances, instance)
+ }
+ sortByCreationDate(instances)
+ return instances
+}
+
+func (i *InstanceCache) GetInstancesForPool(poolID string) []params.Instance {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ var filteredInstances []params.Instance
+ for _, instance := range i.cache {
+ if instance.PoolID == poolID {
+ filteredInstances = append(filteredInstances, instance)
+ }
+ }
+ sortByCreationDate(filteredInstances)
+ return filteredInstances
+}
+
+func (i *InstanceCache) GetInstancesForScaleSet(scaleSetID uint) []params.Instance {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ var filteredInstances []params.Instance
+ for _, instance := range i.cache {
+ if instance.ScaleSetID == scaleSetID {
+ filteredInstances = append(filteredInstances, instance)
+ }
+ }
+ sortByCreationDate(filteredInstances)
+ return filteredInstances
+}
+
+func (i *InstanceCache) GetEntityInstances(entityID string) []params.Instance {
+ pools := GetEntityPools(entityID)
+ poolsAsMap := map[string]bool{}
+ for _, pool := range pools {
+ poolsAsMap[pool.ID] = true
+ }
+
+ ret := []params.Instance{}
+ for _, val := range i.GetAllInstances() {
+ if _, ok := poolsAsMap[val.PoolID]; ok {
+ ret = append(ret, val)
+ }
+ }
+ return ret
+}
+
+func SetInstanceCache(instance params.Instance) {
+ instanceCache.SetInstance(instance)
+}
+
+func GetInstanceCache(name string) (params.Instance, bool) {
+ return instanceCache.GetInstance(name)
+}
+
+func DeleteInstanceCache(name string) {
+ instanceCache.DeleteInstance(name)
+}
+
+func GetAllInstancesCache() []params.Instance {
+ return instanceCache.GetAllInstances()
+}
+
+func GetInstancesForPool(poolID string) []params.Instance {
+ return instanceCache.GetInstancesForPool(poolID)
+}
+
+func GetInstancesForScaleSet(scaleSetID uint) []params.Instance {
+ return instanceCache.GetInstancesForScaleSet(scaleSetID)
+}
+
+func GetEntityInstances(entityID string) []params.Instance {
+ return instanceCache.GetEntityInstances(entityID)
+}
diff --git a/cache/tools_cache.go b/cache/tools_cache.go
new file mode 100644
index 00000000..30e83a0e
--- /dev/null
+++ b/cache/tools_cache.go
@@ -0,0 +1,116 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
+)
+
+var githubToolsCache *GithubToolsCache
+
+func init() {
+ ghToolsCache := &GithubToolsCache{
+ entities: make(map[string]GithubEntityTools),
+ }
+ githubToolsCache = ghToolsCache
+}
+
+type GithubEntityTools struct {
+ updatedAt time.Time
+ expiresAt time.Time
+ err error
+ entity params.ForgeEntity
+ tools []commonParams.RunnerApplicationDownload
+}
+
+type GithubToolsCache struct {
+ mux sync.Mutex
+ // entity IDs are UUID4s. It is highly unlikely they will collide (🤞).
+ entities map[string]GithubEntityTools
+}
+
+func (g *GithubToolsCache) Get(entityID string) ([]commonParams.RunnerApplicationDownload, error) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ if cache, ok := g.entities[entityID]; ok {
+ if cache.entity.Credentials.ForgeType == params.GithubEndpointType {
+ if time.Now().UTC().After(cache.expiresAt.Add(-5 * time.Minute)) {
+ // Stale cache, remove it.
+ delete(g.entities, entityID)
+ return nil, fmt.Errorf("cache expired for entity %s", entityID)
+ }
+ }
+ if cache.err != nil {
+ return nil, cache.err
+ }
+ return cache.tools, nil
+ }
+ return nil, fmt.Errorf("no cache found for entity %s", entityID)
+}
+
+func (g *GithubToolsCache) Set(entity params.ForgeEntity, tools []commonParams.RunnerApplicationDownload) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ forgeTools := GithubEntityTools{
+ updatedAt: time.Now(),
+ entity: entity,
+ tools: tools,
+ err: nil,
+ }
+
+ if entity.Credentials.ForgeType == params.GithubEndpointType {
+ forgeTools.expiresAt = time.Now().Add(1 * time.Hour)
+ }
+
+ g.entities[entity.ID] = forgeTools
+}
+
+func (g *GithubToolsCache) SetToolsError(entity params.ForgeEntity, err error) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ // If the entity is not in the cache, add it with the error.
+ cache, ok := g.entities[entity.ID]
+ if !ok {
+ g.entities[entity.ID] = GithubEntityTools{
+ updatedAt: time.Now(),
+ entity: entity,
+ err: err,
+ }
+ return
+ }
+
+ // Update the error for the existing entity.
+ cache.err = err
+ g.entities[entity.ID] = cache
+}
+
+func SetGithubToolsCache(entity params.ForgeEntity, tools []commonParams.RunnerApplicationDownload) {
+ githubToolsCache.Set(entity, tools)
+}
+
+func GetGithubToolsCache(entityID string) ([]commonParams.RunnerApplicationDownload, error) {
+ return githubToolsCache.Get(entityID)
+}
+
+func SetGithubToolsCacheError(entity params.ForgeEntity, err error) {
+ githubToolsCache.SetToolsError(entity, err)
+}
diff --git a/cache/util.go b/cache/util.go
new file mode 100644
index 00000000..5fd234a9
--- /dev/null
+++ b/cache/util.go
@@ -0,0 +1,32 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sort"
+
+ "github.com/cloudbase/garm/params"
+)
+
+func sortByID[T params.IDGetter](s []T) {
+ sort.Slice(s, func(i, j int) bool {
+ return s[i].GetID() < s[j].GetID()
+ })
+}
+
+func sortByCreationDate[T params.CreationDateGetter](s []T) {
+ sort.Slice(s, func(i, j int) bool {
+ return s[i].GetCreatedAt().Before(s[j].GetCreatedAt())
+ })
+}
diff --git a/client/controller/controller_client.go b/client/controller/controller_client.go
new file mode 100644
index 00000000..cf6cde1a
--- /dev/null
+++ b/client/controller/controller_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new controller API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new controller API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new controller API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for controller API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ UpdateController(params *UpdateControllerParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateControllerOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+UpdateController updates controller
+*/
+func (a *Client) UpdateController(params *UpdateControllerParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateControllerOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateControllerParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateController",
+ Method: "PUT",
+ PathPattern: "/controller",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateControllerReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateControllerOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for UpdateController: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/controller/update_controller_parameters.go b/client/controller/update_controller_parameters.go
new file mode 100644
index 00000000..a0705d60
--- /dev/null
+++ b/client/controller/update_controller_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateControllerParams creates a new UpdateControllerParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateControllerParams() *UpdateControllerParams {
+ return &UpdateControllerParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateControllerParamsWithTimeout creates a new UpdateControllerParams object
+// with the ability to set a timeout on a request.
+func NewUpdateControllerParamsWithTimeout(timeout time.Duration) *UpdateControllerParams {
+ return &UpdateControllerParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateControllerParamsWithContext creates a new UpdateControllerParams object
+// with the ability to set a context for a request.
+func NewUpdateControllerParamsWithContext(ctx context.Context) *UpdateControllerParams {
+ return &UpdateControllerParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateControllerParamsWithHTTPClient creates a new UpdateControllerParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateControllerParamsWithHTTPClient(client *http.Client) *UpdateControllerParams {
+ return &UpdateControllerParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateControllerParams contains all the parameters to send to the API endpoint
+
+ for the update controller operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateControllerParams struct {
+
+ /* Body.
+
+ Parameters used when updating the controller.
+ */
+ Body garm_params.UpdateControllerParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update controller params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateControllerParams) WithDefaults() *UpdateControllerParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update controller params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateControllerParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update controller params
+func (o *UpdateControllerParams) WithTimeout(timeout time.Duration) *UpdateControllerParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update controller params
+func (o *UpdateControllerParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update controller params
+func (o *UpdateControllerParams) WithContext(ctx context.Context) *UpdateControllerParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update controller params
+func (o *UpdateControllerParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update controller params
+func (o *UpdateControllerParams) WithHTTPClient(client *http.Client) *UpdateControllerParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update controller params
+func (o *UpdateControllerParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update controller params
+func (o *UpdateControllerParams) WithBody(body garm_params.UpdateControllerParams) *UpdateControllerParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update controller params
+func (o *UpdateControllerParams) SetBody(body garm_params.UpdateControllerParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateControllerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/controller/update_controller_responses.go b/client/controller/update_controller_responses.go
new file mode 100644
index 00000000..f555a78e
--- /dev/null
+++ b/client/controller/update_controller_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateControllerReader is a Reader for the UpdateController structure.
+type UpdateControllerReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateControllerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateControllerOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewUpdateControllerBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[PUT /controller] UpdateController", response, response.Code())
+ }
+}
+
+// NewUpdateControllerOK creates a UpdateControllerOK with default headers values
+func NewUpdateControllerOK() *UpdateControllerOK {
+ return &UpdateControllerOK{}
+}
+
+/*
+UpdateControllerOK describes a response with status code 200, with default header values.
+
+ControllerInfo
+*/
+type UpdateControllerOK struct {
+ Payload garm_params.ControllerInfo
+}
+
+// IsSuccess returns true when this update controller o k response has a 2xx status code
+func (o *UpdateControllerOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update controller o k response has a 3xx status code
+func (o *UpdateControllerOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update controller o k response has a 4xx status code
+func (o *UpdateControllerOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update controller o k response has a 5xx status code
+func (o *UpdateControllerOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update controller o k response a status code equal to that given
+func (o *UpdateControllerOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update controller o k response
+func (o *UpdateControllerOK) Code() int {
+ return 200
+}
+
+func (o *UpdateControllerOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /controller][%d] updateControllerOK %s", 200, payload)
+}
+
+func (o *UpdateControllerOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /controller][%d] updateControllerOK %s", 200, payload)
+}
+
+func (o *UpdateControllerOK) GetPayload() garm_params.ControllerInfo {
+ return o.Payload
+}
+
+func (o *UpdateControllerOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateControllerBadRequest creates a UpdateControllerBadRequest with default headers values
+func NewUpdateControllerBadRequest() *UpdateControllerBadRequest {
+ return &UpdateControllerBadRequest{}
+}
+
+/*
+UpdateControllerBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type UpdateControllerBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update controller bad request response has a 2xx status code
+func (o *UpdateControllerBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this update controller bad request response has a 3xx status code
+func (o *UpdateControllerBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update controller bad request response has a 4xx status code
+func (o *UpdateControllerBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this update controller bad request response has a 5xx status code
+func (o *UpdateControllerBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update controller bad request response a status code equal to that given
+func (o *UpdateControllerBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the update controller bad request response
+func (o *UpdateControllerBadRequest) Code() int {
+ return 400
+}
+
+func (o *UpdateControllerBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /controller][%d] updateControllerBadRequest %s", 400, payload)
+}
+
+func (o *UpdateControllerBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /controller][%d] updateControllerBadRequest %s", 400, payload)
+}
+
+func (o *UpdateControllerBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateControllerBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/controller_info/controller_info_client.go b/client/controller_info/controller_info_client.go
new file mode 100644
index 00000000..bccd4e06
--- /dev/null
+++ b/client/controller_info/controller_info_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller_info
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new controller info API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new controller info API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new controller info API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for controller info API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ ControllerInfo(params *ControllerInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ControllerInfoOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ControllerInfo gets controller info
+*/
+func (a *Client) ControllerInfo(params *ControllerInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ControllerInfoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewControllerInfoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ControllerInfo",
+ Method: "GET",
+ PathPattern: "/controller-info",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ControllerInfoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ControllerInfoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for ControllerInfo: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/controller_info/controller_info_parameters.go b/client/controller_info/controller_info_parameters.go
new file mode 100644
index 00000000..f4d33ef6
--- /dev/null
+++ b/client/controller_info/controller_info_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller_info
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewControllerInfoParams creates a new ControllerInfoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewControllerInfoParams() *ControllerInfoParams {
+ return &ControllerInfoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewControllerInfoParamsWithTimeout creates a new ControllerInfoParams object
+// with the ability to set a timeout on a request.
+func NewControllerInfoParamsWithTimeout(timeout time.Duration) *ControllerInfoParams {
+ return &ControllerInfoParams{
+ timeout: timeout,
+ }
+}
+
+// NewControllerInfoParamsWithContext creates a new ControllerInfoParams object
+// with the ability to set a context for a request.
+func NewControllerInfoParamsWithContext(ctx context.Context) *ControllerInfoParams {
+ return &ControllerInfoParams{
+ Context: ctx,
+ }
+}
+
+// NewControllerInfoParamsWithHTTPClient creates a new ControllerInfoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewControllerInfoParamsWithHTTPClient(client *http.Client) *ControllerInfoParams {
+ return &ControllerInfoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ControllerInfoParams contains all the parameters to send to the API endpoint
+
+ for the controller info operation.
+
+ Typically these are written to a http.Request.
+*/
+type ControllerInfoParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the controller info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ControllerInfoParams) WithDefaults() *ControllerInfoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the controller info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ControllerInfoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the controller info params
+func (o *ControllerInfoParams) WithTimeout(timeout time.Duration) *ControllerInfoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the controller info params
+func (o *ControllerInfoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the controller info params
+func (o *ControllerInfoParams) WithContext(ctx context.Context) *ControllerInfoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the controller info params
+func (o *ControllerInfoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the controller info params
+func (o *ControllerInfoParams) WithHTTPClient(client *http.Client) *ControllerInfoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the controller info params
+func (o *ControllerInfoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ControllerInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/controller_info/controller_info_responses.go b/client/controller_info/controller_info_responses.go
new file mode 100644
index 00000000..06ec1b7f
--- /dev/null
+++ b/client/controller_info/controller_info_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller_info
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ControllerInfoReader is a Reader for the ControllerInfo structure.
+type ControllerInfoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ControllerInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewControllerInfoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 409:
+ result := NewControllerInfoConflict()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /controller-info] ControllerInfo", response, response.Code())
+ }
+}
+
+// NewControllerInfoOK creates a ControllerInfoOK with default headers values
+func NewControllerInfoOK() *ControllerInfoOK {
+ return &ControllerInfoOK{}
+}
+
+/*
+ControllerInfoOK describes a response with status code 200, with default header values.
+
+ControllerInfo
+*/
+type ControllerInfoOK struct {
+ Payload garm_params.ControllerInfo
+}
+
+// IsSuccess returns true when this controller info o k response has a 2xx status code
+func (o *ControllerInfoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this controller info o k response has a 3xx status code
+func (o *ControllerInfoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this controller info o k response has a 4xx status code
+func (o *ControllerInfoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this controller info o k response has a 5xx status code
+func (o *ControllerInfoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this controller info o k response a status code equal to that given
+func (o *ControllerInfoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the controller info o k response
+func (o *ControllerInfoOK) Code() int {
+ return 200
+}
+
+func (o *ControllerInfoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /controller-info][%d] controllerInfoOK %s", 200, payload)
+}
+
+func (o *ControllerInfoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /controller-info][%d] controllerInfoOK %s", 200, payload)
+}
+
+func (o *ControllerInfoOK) GetPayload() garm_params.ControllerInfo {
+ return o.Payload
+}
+
+func (o *ControllerInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewControllerInfoConflict creates a ControllerInfoConflict with default headers values
+func NewControllerInfoConflict() *ControllerInfoConflict {
+ return &ControllerInfoConflict{}
+}
+
+/*
+ControllerInfoConflict describes a response with status code 409, with default header values.
+
+APIErrorResponse
+*/
+type ControllerInfoConflict struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this controller info conflict response has a 2xx status code
+func (o *ControllerInfoConflict) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this controller info conflict response has a 3xx status code
+func (o *ControllerInfoConflict) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this controller info conflict response has a 4xx status code
+func (o *ControllerInfoConflict) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this controller info conflict response has a 5xx status code
+func (o *ControllerInfoConflict) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this controller info conflict response a status code equal to that given
+func (o *ControllerInfoConflict) IsCode(code int) bool {
+ return code == 409
+}
+
+// Code gets the status code for the controller info conflict response
+func (o *ControllerInfoConflict) Code() int {
+ return 409
+}
+
+func (o *ControllerInfoConflict) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /controller-info][%d] controllerInfoConflict %s", 409, payload)
+}
+
+func (o *ControllerInfoConflict) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /controller-info][%d] controllerInfoConflict %s", 409, payload)
+}
+
+func (o *ControllerInfoConflict) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ControllerInfoConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/create_credentials_parameters.go b/client/credentials/create_credentials_parameters.go
new file mode 100644
index 00000000..4288808f
--- /dev/null
+++ b/client/credentials/create_credentials_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateCredentialsParams creates a new CreateCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateCredentialsParams() *CreateCredentialsParams {
+ return &CreateCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateCredentialsParamsWithTimeout creates a new CreateCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewCreateCredentialsParamsWithTimeout(timeout time.Duration) *CreateCredentialsParams {
+ return &CreateCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateCredentialsParamsWithContext creates a new CreateCredentialsParams object
+// with the ability to set a context for a request.
+func NewCreateCredentialsParamsWithContext(ctx context.Context) *CreateCredentialsParams {
+ return &CreateCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateCredentialsParamsWithHTTPClient creates a new CreateCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateCredentialsParamsWithHTTPClient(client *http.Client) *CreateCredentialsParams {
+ return &CreateCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the create credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateCredentialsParams struct {
+
+ /* Body.
+
+ Parameters used when creating a GitHub credential.
+ */
+ Body garm_params.CreateGithubCredentialsParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateCredentialsParams) WithDefaults() *CreateCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create credentials params
+func (o *CreateCredentialsParams) WithTimeout(timeout time.Duration) *CreateCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create credentials params
+func (o *CreateCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create credentials params
+func (o *CreateCredentialsParams) WithContext(ctx context.Context) *CreateCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create credentials params
+func (o *CreateCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create credentials params
+func (o *CreateCredentialsParams) WithHTTPClient(client *http.Client) *CreateCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create credentials params
+func (o *CreateCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create credentials params
+func (o *CreateCredentialsParams) WithBody(body garm_params.CreateGithubCredentialsParams) *CreateCredentialsParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create credentials params
+func (o *CreateCredentialsParams) SetBody(body garm_params.CreateGithubCredentialsParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/create_credentials_responses.go b/client/credentials/create_credentials_responses.go
new file mode 100644
index 00000000..a0037edf
--- /dev/null
+++ b/client/credentials/create_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateCredentialsReader is a Reader for the CreateCredentials structure.
+type CreateCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewCreateCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[POST /github/credentials] CreateCredentials", response, response.Code())
+ }
+}
+
+// NewCreateCredentialsOK creates a CreateCredentialsOK with default headers values
+func NewCreateCredentialsOK() *CreateCredentialsOK {
+ return &CreateCredentialsOK{}
+}
+
+/*
+CreateCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type CreateCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this create credentials o k response has a 2xx status code
+func (o *CreateCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create credentials o k response has a 3xx status code
+func (o *CreateCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create credentials o k response has a 4xx status code
+func (o *CreateCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create credentials o k response has a 5xx status code
+func (o *CreateCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create credentials o k response a status code equal to that given
+func (o *CreateCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create credentials o k response
+func (o *CreateCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *CreateCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/credentials][%d] createCredentialsOK %s", 200, payload)
+}
+
+func (o *CreateCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/credentials][%d] createCredentialsOK %s", 200, payload)
+}
+
+func (o *CreateCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *CreateCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateCredentialsBadRequest creates a CreateCredentialsBadRequest with default headers values
+func NewCreateCredentialsBadRequest() *CreateCredentialsBadRequest {
+ return &CreateCredentialsBadRequest{}
+}
+
+/*
+CreateCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type CreateCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create credentials bad request response has a 2xx status code
+func (o *CreateCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this create credentials bad request response has a 3xx status code
+func (o *CreateCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create credentials bad request response has a 4xx status code
+func (o *CreateCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this create credentials bad request response has a 5xx status code
+func (o *CreateCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create credentials bad request response a status code equal to that given
+func (o *CreateCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the create credentials bad request response
+func (o *CreateCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *CreateCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/credentials][%d] createCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *CreateCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/credentials][%d] createCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *CreateCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/create_gitea_credentials_parameters.go b/client/credentials/create_gitea_credentials_parameters.go
new file mode 100644
index 00000000..6e255bfa
--- /dev/null
+++ b/client/credentials/create_gitea_credentials_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateGiteaCredentialsParams creates a new CreateGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateGiteaCredentialsParams() *CreateGiteaCredentialsParams {
+ return &CreateGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateGiteaCredentialsParamsWithTimeout creates a new CreateGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewCreateGiteaCredentialsParamsWithTimeout(timeout time.Duration) *CreateGiteaCredentialsParams {
+ return &CreateGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateGiteaCredentialsParamsWithContext creates a new CreateGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewCreateGiteaCredentialsParamsWithContext(ctx context.Context) *CreateGiteaCredentialsParams {
+ return &CreateGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateGiteaCredentialsParamsWithHTTPClient creates a new CreateGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateGiteaCredentialsParamsWithHTTPClient(client *http.Client) *CreateGiteaCredentialsParams {
+ return &CreateGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the create gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateGiteaCredentialsParams struct {
+
+ /* Body.
+
+ Parameters used when creating a Gitea credential.
+ */
+ Body garm_params.CreateGiteaCredentialsParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGiteaCredentialsParams) WithDefaults() *CreateGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) WithTimeout(timeout time.Duration) *CreateGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) WithContext(ctx context.Context) *CreateGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) WithHTTPClient(client *http.Client) *CreateGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) WithBody(body garm_params.CreateGiteaCredentialsParams) *CreateGiteaCredentialsParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) SetBody(body garm_params.CreateGiteaCredentialsParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/create_gitea_credentials_responses.go b/client/credentials/create_gitea_credentials_responses.go
new file mode 100644
index 00000000..2389cb04
--- /dev/null
+++ b/client/credentials/create_gitea_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateGiteaCredentialsReader is a Reader for the CreateGiteaCredentials structure.
+type CreateGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateGiteaCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewCreateGiteaCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[POST /gitea/credentials] CreateGiteaCredentials", response, response.Code())
+ }
+}
+
+// NewCreateGiteaCredentialsOK creates a CreateGiteaCredentialsOK with default headers values
+func NewCreateGiteaCredentialsOK() *CreateGiteaCredentialsOK {
+ return &CreateGiteaCredentialsOK{}
+}
+
+/*
+CreateGiteaCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type CreateGiteaCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this create gitea credentials o k response has a 2xx status code
+func (o *CreateGiteaCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create gitea credentials o k response has a 3xx status code
+func (o *CreateGiteaCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create gitea credentials o k response has a 4xx status code
+func (o *CreateGiteaCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create gitea credentials o k response has a 5xx status code
+func (o *CreateGiteaCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create gitea credentials o k response a status code equal to that given
+func (o *CreateGiteaCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create gitea credentials o k response
+func (o *CreateGiteaCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *CreateGiteaCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/credentials][%d] createGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *CreateGiteaCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/credentials][%d] createGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *CreateGiteaCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *CreateGiteaCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateGiteaCredentialsBadRequest creates a CreateGiteaCredentialsBadRequest with default headers values
+func NewCreateGiteaCredentialsBadRequest() *CreateGiteaCredentialsBadRequest {
+ return &CreateGiteaCredentialsBadRequest{}
+}
+
+/*
+CreateGiteaCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type CreateGiteaCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create gitea credentials bad request response has a 2xx status code
+func (o *CreateGiteaCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this create gitea credentials bad request response has a 3xx status code
+func (o *CreateGiteaCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create gitea credentials bad request response has a 4xx status code
+func (o *CreateGiteaCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this create gitea credentials bad request response has a 5xx status code
+func (o *CreateGiteaCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create gitea credentials bad request response a status code equal to that given
+func (o *CreateGiteaCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the create gitea credentials bad request response
+func (o *CreateGiteaCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *CreateGiteaCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/credentials][%d] createGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *CreateGiteaCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/credentials][%d] createGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *CreateGiteaCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateGiteaCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/credentials_client.go b/client/credentials/credentials_client.go
index 226b702d..3dfe1abd 100644
--- a/client/credentials/credentials_client.go
+++ b/client/credentials/credentials_client.go
@@ -9,6 +9,7 @@ import (
"fmt"
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -17,6 +18,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new credentials API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new credentials API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for credentials API
*/
@@ -25,16 +51,254 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
+ CreateCredentials(params *CreateCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateCredentialsOK, error)
+
+ CreateGiteaCredentials(params *CreateGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGiteaCredentialsOK, error)
+
+ DeleteCredentials(params *DeleteCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ DeleteGiteaCredentials(params *DeleteGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetCredentials(params *GetCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetCredentialsOK, error)
+
+ GetGiteaCredentials(params *GetGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGiteaCredentialsOK, error)
+
ListCredentials(params *ListCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListCredentialsOK, error)
+ ListGiteaCredentials(params *ListGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGiteaCredentialsOK, error)
+
+ UpdateCredentials(params *UpdateCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateCredentialsOK, error)
+
+ UpdateGiteaCredentials(params *UpdateGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGiteaCredentialsOK, error)
+
SetTransport(transport runtime.ClientTransport)
}
+/*
+CreateCredentials creates a git hub credential
+*/
+func (a *Client) CreateCredentials(params *CreateCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateCredentials",
+ Method: "POST",
+ PathPattern: "/github/credentials",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for CreateCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+CreateGiteaCredentials creates a gitea credential
+*/
+func (a *Client) CreateGiteaCredentials(params *CreateGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGiteaCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateGiteaCredentials",
+ Method: "POST",
+ PathPattern: "/gitea/credentials",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateGiteaCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for CreateGiteaCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+DeleteCredentials deletes a git hub credential
+*/
+func (a *Client) DeleteCredentials(params *DeleteCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteCredentials",
+ Method: "DELETE",
+ PathPattern: "/github/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+DeleteGiteaCredentials deletes a gitea credential
+*/
+func (a *Client) DeleteGiteaCredentials(params *DeleteGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteGiteaCredentials",
+ Method: "DELETE",
+ PathPattern: "/gitea/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetCredentials gets a git hub credential
+*/
+func (a *Client) GetCredentials(params *GetCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetCredentials",
+ Method: "GET",
+ PathPattern: "/github/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetGiteaCredentials gets a gitea credential
+*/
+func (a *Client) GetGiteaCredentials(params *GetGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGiteaCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetGiteaCredentials",
+ Method: "GET",
+ PathPattern: "/gitea/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetGiteaCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetGiteaCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
/*
ListCredentials lists all credentials
*/
@@ -46,7 +310,7 @@ func (a *Client) ListCredentials(params *ListCredentialsParams, authInfo runtime
op := &runtime.ClientOperation{
ID: "ListCredentials",
Method: "GET",
- PathPattern: "/credentials",
+ PathPattern: "/github/credentials",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
@@ -74,6 +338,123 @@ func (a *Client) ListCredentials(params *ListCredentialsParams, authInfo runtime
panic(msg)
}
+/*
+ListGiteaCredentials lists all credentials
+*/
+func (a *Client) ListGiteaCredentials(params *ListGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGiteaCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListGiteaCredentials",
+ Method: "GET",
+ PathPattern: "/gitea/credentials",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListGiteaCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for ListGiteaCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+UpdateCredentials updates a git hub credential
+*/
+func (a *Client) UpdateCredentials(params *UpdateCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateCredentials",
+ Method: "PUT",
+ PathPattern: "/github/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for UpdateCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+UpdateGiteaCredentials updates a gitea credential
+*/
+func (a *Client) UpdateGiteaCredentials(params *UpdateGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGiteaCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateGiteaCredentials",
+ Method: "PUT",
+ PathPattern: "/gitea/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateGiteaCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for UpdateGiteaCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
diff --git a/client/credentials/delete_credentials_parameters.go b/client/credentials/delete_credentials_parameters.go
new file mode 100644
index 00000000..f36f8725
--- /dev/null
+++ b/client/credentials/delete_credentials_parameters.go
@@ -0,0 +1,152 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteCredentialsParams creates a new DeleteCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteCredentialsParams() *DeleteCredentialsParams {
+ return &DeleteCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteCredentialsParamsWithTimeout creates a new DeleteCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewDeleteCredentialsParamsWithTimeout(timeout time.Duration) *DeleteCredentialsParams {
+ return &DeleteCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteCredentialsParamsWithContext creates a new DeleteCredentialsParams object
+// with the ability to set a context for a request.
+func NewDeleteCredentialsParamsWithContext(ctx context.Context) *DeleteCredentialsParams {
+ return &DeleteCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteCredentialsParamsWithHTTPClient creates a new DeleteCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteCredentialsParamsWithHTTPClient(client *http.Client) *DeleteCredentialsParams {
+ return &DeleteCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the delete credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteCredentialsParams struct {
+
+ /* ID.
+
+ ID of the GitHub credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteCredentialsParams) WithDefaults() *DeleteCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete credentials params
+func (o *DeleteCredentialsParams) WithTimeout(timeout time.Duration) *DeleteCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete credentials params
+func (o *DeleteCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete credentials params
+func (o *DeleteCredentialsParams) WithContext(ctx context.Context) *DeleteCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete credentials params
+func (o *DeleteCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete credentials params
+func (o *DeleteCredentialsParams) WithHTTPClient(client *http.Client) *DeleteCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete credentials params
+func (o *DeleteCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the delete credentials params
+func (o *DeleteCredentialsParams) WithID(id int64) *DeleteCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the delete credentials params
+func (o *DeleteCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/delete_credentials_responses.go b/client/credentials/delete_credentials_responses.go
new file mode 100644
index 00000000..32d045e7
--- /dev/null
+++ b/client/credentials/delete_credentials_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteCredentialsReader is a Reader for the DeleteCredentials structure.
+type DeleteCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteCredentialsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteCredentialsDefault creates a DeleteCredentialsDefault with default headers values
+func NewDeleteCredentialsDefault(code int) *DeleteCredentialsDefault {
+ return &DeleteCredentialsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteCredentialsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteCredentialsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete credentials default response has a 2xx status code
+func (o *DeleteCredentialsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete credentials default response has a 3xx status code
+func (o *DeleteCredentialsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete credentials default response has a 4xx status code
+func (o *DeleteCredentialsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete credentials default response has a 5xx status code
+func (o *DeleteCredentialsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete credentials default response a status code equal to that given
+func (o *DeleteCredentialsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete credentials default response
+func (o *DeleteCredentialsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteCredentialsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /github/credentials/{id}][%d] DeleteCredentials default %s", o._statusCode, payload)
+}
+
+func (o *DeleteCredentialsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /github/credentials/{id}][%d] DeleteCredentials default %s", o._statusCode, payload)
+}
+
+func (o *DeleteCredentialsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteCredentialsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/delete_gitea_credentials_parameters.go b/client/credentials/delete_gitea_credentials_parameters.go
new file mode 100644
index 00000000..598ac477
--- /dev/null
+++ b/client/credentials/delete_gitea_credentials_parameters.go
@@ -0,0 +1,152 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteGiteaCredentialsParams creates a new DeleteGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteGiteaCredentialsParams() *DeleteGiteaCredentialsParams {
+ return &DeleteGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteGiteaCredentialsParamsWithTimeout creates a new DeleteGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewDeleteGiteaCredentialsParamsWithTimeout(timeout time.Duration) *DeleteGiteaCredentialsParams {
+ return &DeleteGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteGiteaCredentialsParamsWithContext creates a new DeleteGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewDeleteGiteaCredentialsParamsWithContext(ctx context.Context) *DeleteGiteaCredentialsParams {
+ return &DeleteGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteGiteaCredentialsParamsWithHTTPClient creates a new DeleteGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteGiteaCredentialsParamsWithHTTPClient(client *http.Client) *DeleteGiteaCredentialsParams {
+ return &DeleteGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the delete gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteGiteaCredentialsParams struct {
+
+ /* ID.
+
+ ID of the Gitea credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGiteaCredentialsParams) WithDefaults() *DeleteGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) WithTimeout(timeout time.Duration) *DeleteGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) WithContext(ctx context.Context) *DeleteGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) WithHTTPClient(client *http.Client) *DeleteGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) WithID(id int64) *DeleteGiteaCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/delete_gitea_credentials_responses.go b/client/credentials/delete_gitea_credentials_responses.go
new file mode 100644
index 00000000..d1df7b0b
--- /dev/null
+++ b/client/credentials/delete_gitea_credentials_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteGiteaCredentialsReader is a Reader for the DeleteGiteaCredentials structure.
+type DeleteGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteGiteaCredentialsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteGiteaCredentialsDefault creates a DeleteGiteaCredentialsDefault with default headers values
+func NewDeleteGiteaCredentialsDefault(code int) *DeleteGiteaCredentialsDefault {
+ return &DeleteGiteaCredentialsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteGiteaCredentialsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteGiteaCredentialsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete gitea credentials default response has a 2xx status code
+func (o *DeleteGiteaCredentialsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete gitea credentials default response has a 3xx status code
+func (o *DeleteGiteaCredentialsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete gitea credentials default response has a 4xx status code
+func (o *DeleteGiteaCredentialsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete gitea credentials default response has a 5xx status code
+func (o *DeleteGiteaCredentialsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete gitea credentials default response a status code equal to that given
+func (o *DeleteGiteaCredentialsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete gitea credentials default response
+func (o *DeleteGiteaCredentialsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteGiteaCredentialsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /gitea/credentials/{id}][%d] DeleteGiteaCredentials default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGiteaCredentialsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /gitea/credentials/{id}][%d] DeleteGiteaCredentials default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGiteaCredentialsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteGiteaCredentialsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/get_credentials_parameters.go b/client/credentials/get_credentials_parameters.go
new file mode 100644
index 00000000..ff8305e8
--- /dev/null
+++ b/client/credentials/get_credentials_parameters.go
@@ -0,0 +1,152 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetCredentialsParams creates a new GetCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetCredentialsParams() *GetCredentialsParams {
+ return &GetCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetCredentialsParamsWithTimeout creates a new GetCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewGetCredentialsParamsWithTimeout(timeout time.Duration) *GetCredentialsParams {
+ return &GetCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetCredentialsParamsWithContext creates a new GetCredentialsParams object
+// with the ability to set a context for a request.
+func NewGetCredentialsParamsWithContext(ctx context.Context) *GetCredentialsParams {
+ return &GetCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetCredentialsParamsWithHTTPClient creates a new GetCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetCredentialsParamsWithHTTPClient(client *http.Client) *GetCredentialsParams {
+ return &GetCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the get credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetCredentialsParams struct {
+
+ /* ID.
+
+ ID of the GitHub credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetCredentialsParams) WithDefaults() *GetCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get credentials params
+func (o *GetCredentialsParams) WithTimeout(timeout time.Duration) *GetCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get credentials params
+func (o *GetCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get credentials params
+func (o *GetCredentialsParams) WithContext(ctx context.Context) *GetCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get credentials params
+func (o *GetCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get credentials params
+func (o *GetCredentialsParams) WithHTTPClient(client *http.Client) *GetCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get credentials params
+func (o *GetCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get credentials params
+func (o *GetCredentialsParams) WithID(id int64) *GetCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get credentials params
+func (o *GetCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/get_credentials_responses.go b/client/credentials/get_credentials_responses.go
new file mode 100644
index 00000000..4538c16e
--- /dev/null
+++ b/client/credentials/get_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetCredentialsReader is a Reader for the GetCredentials structure.
+type GetCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /github/credentials/{id}] GetCredentials", response, response.Code())
+ }
+}
+
+// NewGetCredentialsOK creates a GetCredentialsOK with default headers values
+func NewGetCredentialsOK() *GetCredentialsOK {
+ return &GetCredentialsOK{}
+}
+
+/*
+GetCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type GetCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this get credentials o k response has a 2xx status code
+func (o *GetCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get credentials o k response has a 3xx status code
+func (o *GetCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get credentials o k response has a 4xx status code
+func (o *GetCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get credentials o k response has a 5xx status code
+func (o *GetCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get credentials o k response a status code equal to that given
+func (o *GetCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get credentials o k response
+func (o *GetCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *GetCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials/{id}][%d] getCredentialsOK %s", 200, payload)
+}
+
+func (o *GetCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials/{id}][%d] getCredentialsOK %s", 200, payload)
+}
+
+func (o *GetCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *GetCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetCredentialsBadRequest creates a GetCredentialsBadRequest with default headers values
+func NewGetCredentialsBadRequest() *GetCredentialsBadRequest {
+ return &GetCredentialsBadRequest{}
+}
+
+/*
+GetCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type GetCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get credentials bad request response has a 2xx status code
+func (o *GetCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get credentials bad request response has a 3xx status code
+func (o *GetCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get credentials bad request response has a 4xx status code
+func (o *GetCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get credentials bad request response has a 5xx status code
+func (o *GetCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get credentials bad request response a status code equal to that given
+func (o *GetCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the get credentials bad request response
+func (o *GetCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *GetCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials/{id}][%d] getCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *GetCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials/{id}][%d] getCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *GetCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/get_gitea_credentials_parameters.go b/client/credentials/get_gitea_credentials_parameters.go
new file mode 100644
index 00000000..a844c326
--- /dev/null
+++ b/client/credentials/get_gitea_credentials_parameters.go
@@ -0,0 +1,152 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetGiteaCredentialsParams creates a new GetGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetGiteaCredentialsParams() *GetGiteaCredentialsParams {
+ return &GetGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetGiteaCredentialsParamsWithTimeout creates a new GetGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewGetGiteaCredentialsParamsWithTimeout(timeout time.Duration) *GetGiteaCredentialsParams {
+ return &GetGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetGiteaCredentialsParamsWithContext creates a new GetGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewGetGiteaCredentialsParamsWithContext(ctx context.Context) *GetGiteaCredentialsParams {
+ return &GetGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetGiteaCredentialsParamsWithHTTPClient creates a new GetGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetGiteaCredentialsParamsWithHTTPClient(client *http.Client) *GetGiteaCredentialsParams {
+ return &GetGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the get gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetGiteaCredentialsParams struct {
+
+ /* ID.
+
+ ID of the Gitea credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGiteaCredentialsParams) WithDefaults() *GetGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) WithTimeout(timeout time.Duration) *GetGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) WithContext(ctx context.Context) *GetGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) WithHTTPClient(client *http.Client) *GetGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) WithID(id int64) *GetGiteaCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/get_gitea_credentials_responses.go b/client/credentials/get_gitea_credentials_responses.go
new file mode 100644
index 00000000..ba116d63
--- /dev/null
+++ b/client/credentials/get_gitea_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetGiteaCredentialsReader is a Reader for the GetGiteaCredentials structure.
+type GetGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetGiteaCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetGiteaCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /gitea/credentials/{id}] GetGiteaCredentials", response, response.Code())
+ }
+}
+
+// NewGetGiteaCredentialsOK creates a GetGiteaCredentialsOK with default headers values
+func NewGetGiteaCredentialsOK() *GetGiteaCredentialsOK {
+ return &GetGiteaCredentialsOK{}
+}
+
+/*
+GetGiteaCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type GetGiteaCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this get gitea credentials o k response has a 2xx status code
+func (o *GetGiteaCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get gitea credentials o k response has a 3xx status code
+func (o *GetGiteaCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get gitea credentials o k response has a 4xx status code
+func (o *GetGiteaCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get gitea credentials o k response has a 5xx status code
+func (o *GetGiteaCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get gitea credentials o k response a status code equal to that given
+func (o *GetGiteaCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get gitea credentials o k response
+func (o *GetGiteaCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *GetGiteaCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials/{id}][%d] getGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *GetGiteaCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials/{id}][%d] getGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *GetGiteaCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *GetGiteaCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetGiteaCredentialsBadRequest creates a GetGiteaCredentialsBadRequest with default headers values
+func NewGetGiteaCredentialsBadRequest() *GetGiteaCredentialsBadRequest {
+ return &GetGiteaCredentialsBadRequest{}
+}
+
+/*
+GetGiteaCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type GetGiteaCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get gitea credentials bad request response has a 2xx status code
+func (o *GetGiteaCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get gitea credentials bad request response has a 3xx status code
+func (o *GetGiteaCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get gitea credentials bad request response has a 4xx status code
+func (o *GetGiteaCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get gitea credentials bad request response has a 5xx status code
+func (o *GetGiteaCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get gitea credentials bad request response a status code equal to that given
+func (o *GetGiteaCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the get gitea credentials bad request response
+func (o *GetGiteaCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *GetGiteaCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials/{id}][%d] getGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *GetGiteaCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials/{id}][%d] getGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *GetGiteaCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetGiteaCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/list_credentials_responses.go b/client/credentials/list_credentials_responses.go
index 2d4c2f9a..46163dc9 100644
--- a/client/credentials/list_credentials_responses.go
+++ b/client/credentials/list_credentials_responses.go
@@ -6,6 +6,7 @@ package credentials
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -37,7 +38,7 @@ func (o *ListCredentialsReader) ReadResponse(response runtime.ClientResponse, co
}
return nil, result
default:
- return nil, runtime.NewAPIError("[GET /credentials] ListCredentials", response, response.Code())
+ return nil, runtime.NewAPIError("[GET /github/credentials] ListCredentials", response, response.Code())
}
}
@@ -86,11 +87,13 @@ func (o *ListCredentialsOK) Code() int {
}
func (o *ListCredentialsOK) Error() string {
- return fmt.Sprintf("[GET /credentials][%d] listCredentialsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials][%d] listCredentialsOK %s", 200, payload)
}
func (o *ListCredentialsOK) String() string {
- return fmt.Sprintf("[GET /credentials][%d] listCredentialsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials][%d] listCredentialsOK %s", 200, payload)
}
func (o *ListCredentialsOK) GetPayload() garm_params.Credentials {
@@ -152,11 +155,13 @@ func (o *ListCredentialsBadRequest) Code() int {
}
func (o *ListCredentialsBadRequest) Error() string {
- return fmt.Sprintf("[GET /credentials][%d] listCredentialsBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials][%d] listCredentialsBadRequest %s", 400, payload)
}
func (o *ListCredentialsBadRequest) String() string {
- return fmt.Sprintf("[GET /credentials][%d] listCredentialsBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials][%d] listCredentialsBadRequest %s", 400, payload)
}
func (o *ListCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/credentials/list_gitea_credentials_parameters.go b/client/credentials/list_gitea_credentials_parameters.go
new file mode 100644
index 00000000..5e321a88
--- /dev/null
+++ b/client/credentials/list_gitea_credentials_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListGiteaCredentialsParams creates a new ListGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListGiteaCredentialsParams() *ListGiteaCredentialsParams {
+ return &ListGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListGiteaCredentialsParamsWithTimeout creates a new ListGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewListGiteaCredentialsParamsWithTimeout(timeout time.Duration) *ListGiteaCredentialsParams {
+ return &ListGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListGiteaCredentialsParamsWithContext creates a new ListGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewListGiteaCredentialsParamsWithContext(ctx context.Context) *ListGiteaCredentialsParams {
+ return &ListGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewListGiteaCredentialsParamsWithHTTPClient creates a new ListGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListGiteaCredentialsParamsWithHTTPClient(client *http.Client) *ListGiteaCredentialsParams {
+ return &ListGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the list gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListGiteaCredentialsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGiteaCredentialsParams) WithDefaults() *ListGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) WithTimeout(timeout time.Duration) *ListGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) WithContext(ctx context.Context) *ListGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) WithHTTPClient(client *http.Client) *ListGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/list_gitea_credentials_responses.go b/client/credentials/list_gitea_credentials_responses.go
new file mode 100644
index 00000000..f27864be
--- /dev/null
+++ b/client/credentials/list_gitea_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListGiteaCredentialsReader is a Reader for the ListGiteaCredentials structure.
+type ListGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListGiteaCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewListGiteaCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /gitea/credentials] ListGiteaCredentials", response, response.Code())
+ }
+}
+
+// NewListGiteaCredentialsOK creates a ListGiteaCredentialsOK with default headers values
+func NewListGiteaCredentialsOK() *ListGiteaCredentialsOK {
+ return &ListGiteaCredentialsOK{}
+}
+
+/*
+ListGiteaCredentialsOK describes a response with status code 200, with default header values.
+
+Credentials
+*/
+type ListGiteaCredentialsOK struct {
+ Payload garm_params.Credentials
+}
+
+// IsSuccess returns true when this list gitea credentials o k response has a 2xx status code
+func (o *ListGiteaCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list gitea credentials o k response has a 3xx status code
+func (o *ListGiteaCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list gitea credentials o k response has a 4xx status code
+func (o *ListGiteaCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list gitea credentials o k response has a 5xx status code
+func (o *ListGiteaCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list gitea credentials o k response a status code equal to that given
+func (o *ListGiteaCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list gitea credentials o k response
+func (o *ListGiteaCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *ListGiteaCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials][%d] listGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *ListGiteaCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials][%d] listGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *ListGiteaCredentialsOK) GetPayload() garm_params.Credentials {
+ return o.Payload
+}
+
+func (o *ListGiteaCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListGiteaCredentialsBadRequest creates a ListGiteaCredentialsBadRequest with default headers values
+func NewListGiteaCredentialsBadRequest() *ListGiteaCredentialsBadRequest {
+ return &ListGiteaCredentialsBadRequest{}
+}
+
+/*
+ListGiteaCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type ListGiteaCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list gitea credentials bad request response has a 2xx status code
+func (o *ListGiteaCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this list gitea credentials bad request response has a 3xx status code
+func (o *ListGiteaCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list gitea credentials bad request response has a 4xx status code
+func (o *ListGiteaCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this list gitea credentials bad request response has a 5xx status code
+func (o *ListGiteaCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list gitea credentials bad request response a status code equal to that given
+func (o *ListGiteaCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the list gitea credentials bad request response
+func (o *ListGiteaCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *ListGiteaCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials][%d] listGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *ListGiteaCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials][%d] listGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *ListGiteaCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListGiteaCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/update_credentials_parameters.go b/client/credentials/update_credentials_parameters.go
new file mode 100644
index 00000000..bba26c95
--- /dev/null
+++ b/client/credentials/update_credentials_parameters.go
@@ -0,0 +1,174 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateCredentialsParams creates a new UpdateCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateCredentialsParams() *UpdateCredentialsParams {
+ return &UpdateCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateCredentialsParamsWithTimeout creates a new UpdateCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewUpdateCredentialsParamsWithTimeout(timeout time.Duration) *UpdateCredentialsParams {
+ return &UpdateCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateCredentialsParamsWithContext creates a new UpdateCredentialsParams object
+// with the ability to set a context for a request.
+func NewUpdateCredentialsParamsWithContext(ctx context.Context) *UpdateCredentialsParams {
+ return &UpdateCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateCredentialsParamsWithHTTPClient creates a new UpdateCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateCredentialsParamsWithHTTPClient(client *http.Client) *UpdateCredentialsParams {
+ return &UpdateCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the update credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateCredentialsParams struct {
+
+ /* Body.
+
+ Parameters used when updating a GitHub credential.
+ */
+ Body garm_params.UpdateGithubCredentialsParams
+
+ /* ID.
+
+ ID of the GitHub credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateCredentialsParams) WithDefaults() *UpdateCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update credentials params
+func (o *UpdateCredentialsParams) WithTimeout(timeout time.Duration) *UpdateCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update credentials params
+func (o *UpdateCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update credentials params
+func (o *UpdateCredentialsParams) WithContext(ctx context.Context) *UpdateCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update credentials params
+func (o *UpdateCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update credentials params
+func (o *UpdateCredentialsParams) WithHTTPClient(client *http.Client) *UpdateCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update credentials params
+func (o *UpdateCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update credentials params
+func (o *UpdateCredentialsParams) WithBody(body garm_params.UpdateGithubCredentialsParams) *UpdateCredentialsParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update credentials params
+func (o *UpdateCredentialsParams) SetBody(body garm_params.UpdateGithubCredentialsParams) {
+ o.Body = body
+}
+
+// WithID adds the id to the update credentials params
+func (o *UpdateCredentialsParams) WithID(id int64) *UpdateCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the update credentials params
+func (o *UpdateCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/update_credentials_responses.go b/client/credentials/update_credentials_responses.go
new file mode 100644
index 00000000..6a9f37f8
--- /dev/null
+++ b/client/credentials/update_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateCredentialsReader is a Reader for the UpdateCredentials structure.
+type UpdateCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewUpdateCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[PUT /github/credentials/{id}] UpdateCredentials", response, response.Code())
+ }
+}
+
+// NewUpdateCredentialsOK creates a UpdateCredentialsOK with default headers values
+func NewUpdateCredentialsOK() *UpdateCredentialsOK {
+ return &UpdateCredentialsOK{}
+}
+
+/*
+UpdateCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type UpdateCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this update credentials o k response has a 2xx status code
+func (o *UpdateCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update credentials o k response has a 3xx status code
+func (o *UpdateCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update credentials o k response has a 4xx status code
+func (o *UpdateCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update credentials o k response has a 5xx status code
+func (o *UpdateCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update credentials o k response a status code equal to that given
+func (o *UpdateCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update credentials o k response
+func (o *UpdateCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *UpdateCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/credentials/{id}][%d] updateCredentialsOK %s", 200, payload)
+}
+
+func (o *UpdateCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/credentials/{id}][%d] updateCredentialsOK %s", 200, payload)
+}
+
+func (o *UpdateCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *UpdateCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateCredentialsBadRequest creates a UpdateCredentialsBadRequest with default headers values
+func NewUpdateCredentialsBadRequest() *UpdateCredentialsBadRequest {
+ return &UpdateCredentialsBadRequest{}
+}
+
+/*
+UpdateCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type UpdateCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update credentials bad request response has a 2xx status code
+func (o *UpdateCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this update credentials bad request response has a 3xx status code
+func (o *UpdateCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update credentials bad request response has a 4xx status code
+func (o *UpdateCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this update credentials bad request response has a 5xx status code
+func (o *UpdateCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update credentials bad request response a status code equal to that given
+func (o *UpdateCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the update credentials bad request response
+func (o *UpdateCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *UpdateCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/credentials/{id}][%d] updateCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *UpdateCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/credentials/{id}][%d] updateCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *UpdateCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/update_gitea_credentials_parameters.go b/client/credentials/update_gitea_credentials_parameters.go
new file mode 100644
index 00000000..1907a0f2
--- /dev/null
+++ b/client/credentials/update_gitea_credentials_parameters.go
@@ -0,0 +1,174 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateGiteaCredentialsParams creates a new UpdateGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateGiteaCredentialsParams() *UpdateGiteaCredentialsParams {
+ return &UpdateGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateGiteaCredentialsParamsWithTimeout creates a new UpdateGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewUpdateGiteaCredentialsParamsWithTimeout(timeout time.Duration) *UpdateGiteaCredentialsParams {
+ return &UpdateGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateGiteaCredentialsParamsWithContext creates a new UpdateGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewUpdateGiteaCredentialsParamsWithContext(ctx context.Context) *UpdateGiteaCredentialsParams {
+ return &UpdateGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateGiteaCredentialsParamsWithHTTPClient creates a new UpdateGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateGiteaCredentialsParamsWithHTTPClient(client *http.Client) *UpdateGiteaCredentialsParams {
+ return &UpdateGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the update gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateGiteaCredentialsParams struct {
+
+ /* Body.
+
+ Parameters used when updating a Gitea credential.
+ */
+ Body garm_params.UpdateGiteaCredentialsParams
+
+ /* ID.
+
+ ID of the Gitea credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGiteaCredentialsParams) WithDefaults() *UpdateGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithTimeout(timeout time.Duration) *UpdateGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithContext(ctx context.Context) *UpdateGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithHTTPClient(client *http.Client) *UpdateGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithBody(body garm_params.UpdateGiteaCredentialsParams) *UpdateGiteaCredentialsParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetBody(body garm_params.UpdateGiteaCredentialsParams) {
+ o.Body = body
+}
+
+// WithID adds the id to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithID(id int64) *UpdateGiteaCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/update_gitea_credentials_responses.go b/client/credentials/update_gitea_credentials_responses.go
new file mode 100644
index 00000000..edbb54d8
--- /dev/null
+++ b/client/credentials/update_gitea_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateGiteaCredentialsReader is a Reader for the UpdateGiteaCredentials structure.
+type UpdateGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateGiteaCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewUpdateGiteaCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[PUT /gitea/credentials/{id}] UpdateGiteaCredentials", response, response.Code())
+ }
+}
+
+// NewUpdateGiteaCredentialsOK creates a UpdateGiteaCredentialsOK with default headers values
+func NewUpdateGiteaCredentialsOK() *UpdateGiteaCredentialsOK {
+ return &UpdateGiteaCredentialsOK{}
+}
+
+/*
+UpdateGiteaCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type UpdateGiteaCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this update gitea credentials o k response has a 2xx status code
+func (o *UpdateGiteaCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update gitea credentials o k response has a 3xx status code
+func (o *UpdateGiteaCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update gitea credentials o k response has a 4xx status code
+func (o *UpdateGiteaCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update gitea credentials o k response has a 5xx status code
+func (o *UpdateGiteaCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update gitea credentials o k response a status code equal to that given
+func (o *UpdateGiteaCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update gitea credentials o k response
+func (o *UpdateGiteaCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *UpdateGiteaCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/credentials/{id}][%d] updateGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *UpdateGiteaCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/credentials/{id}][%d] updateGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *UpdateGiteaCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *UpdateGiteaCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateGiteaCredentialsBadRequest creates a UpdateGiteaCredentialsBadRequest with default headers values
+func NewUpdateGiteaCredentialsBadRequest() *UpdateGiteaCredentialsBadRequest {
+ return &UpdateGiteaCredentialsBadRequest{}
+}
+
+/*
+UpdateGiteaCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type UpdateGiteaCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update gitea credentials bad request response has a 2xx status code
+func (o *UpdateGiteaCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this update gitea credentials bad request response has a 3xx status code
+func (o *UpdateGiteaCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update gitea credentials bad request response has a 4xx status code
+func (o *UpdateGiteaCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this update gitea credentials bad request response has a 5xx status code
+func (o *UpdateGiteaCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update gitea credentials bad request response a status code equal to that given
+func (o *UpdateGiteaCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the update gitea credentials bad request response
+func (o *UpdateGiteaCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *UpdateGiteaCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/credentials/{id}][%d] updateGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *UpdateGiteaCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/credentials/{id}][%d] updateGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *UpdateGiteaCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateGiteaCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/create_gitea_endpoint_parameters.go b/client/endpoints/create_gitea_endpoint_parameters.go
new file mode 100644
index 00000000..11dfa73f
--- /dev/null
+++ b/client/endpoints/create_gitea_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateGiteaEndpointParams creates a new CreateGiteaEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateGiteaEndpointParams() *CreateGiteaEndpointParams {
+ return &CreateGiteaEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateGiteaEndpointParamsWithTimeout creates a new CreateGiteaEndpointParams object
+// with the ability to set a timeout on a request.
+func NewCreateGiteaEndpointParamsWithTimeout(timeout time.Duration) *CreateGiteaEndpointParams {
+ return &CreateGiteaEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateGiteaEndpointParamsWithContext creates a new CreateGiteaEndpointParams object
+// with the ability to set a context for a request.
+func NewCreateGiteaEndpointParamsWithContext(ctx context.Context) *CreateGiteaEndpointParams {
+ return &CreateGiteaEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateGiteaEndpointParamsWithHTTPClient creates a new CreateGiteaEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateGiteaEndpointParamsWithHTTPClient(client *http.Client) *CreateGiteaEndpointParams {
+ return &CreateGiteaEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateGiteaEndpointParams contains all the parameters to send to the API endpoint
+
+ for the create gitea endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateGiteaEndpointParams struct {
+
+ /* Body.
+
+ Parameters used when creating a Gitea endpoint.
+ */
+ Body garm_params.CreateGiteaEndpointParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGiteaEndpointParams) WithDefaults() *CreateGiteaEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGiteaEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) WithTimeout(timeout time.Duration) *CreateGiteaEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) WithContext(ctx context.Context) *CreateGiteaEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) WithHTTPClient(client *http.Client) *CreateGiteaEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) WithBody(body garm_params.CreateGiteaEndpointParams) *CreateGiteaEndpointParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) SetBody(body garm_params.CreateGiteaEndpointParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateGiteaEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/create_gitea_endpoint_responses.go b/client/endpoints/create_gitea_endpoint_responses.go
new file mode 100644
index 00000000..6e99a973
--- /dev/null
+++ b/client/endpoints/create_gitea_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateGiteaEndpointReader is a Reader for the CreateGiteaEndpoint structure.
+type CreateGiteaEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateGiteaEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateGiteaEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateGiteaEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateGiteaEndpointOK creates a CreateGiteaEndpointOK with default headers values
+func NewCreateGiteaEndpointOK() *CreateGiteaEndpointOK {
+ return &CreateGiteaEndpointOK{}
+}
+
+/*
+CreateGiteaEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type CreateGiteaEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this create gitea endpoint o k response has a 2xx status code
+func (o *CreateGiteaEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create gitea endpoint o k response has a 3xx status code
+func (o *CreateGiteaEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create gitea endpoint o k response has a 4xx status code
+func (o *CreateGiteaEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create gitea endpoint o k response has a 5xx status code
+func (o *CreateGiteaEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create gitea endpoint o k response a status code equal to that given
+func (o *CreateGiteaEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create gitea endpoint o k response
+func (o *CreateGiteaEndpointOK) Code() int {
+ return 200
+}
+
+func (o *CreateGiteaEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/endpoints][%d] createGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *CreateGiteaEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/endpoints][%d] createGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *CreateGiteaEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *CreateGiteaEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateGiteaEndpointDefault creates a CreateGiteaEndpointDefault with default headers values
+func NewCreateGiteaEndpointDefault(code int) *CreateGiteaEndpointDefault {
+ return &CreateGiteaEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateGiteaEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateGiteaEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create gitea endpoint default response has a 2xx status code
+func (o *CreateGiteaEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create gitea endpoint default response has a 3xx status code
+func (o *CreateGiteaEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create gitea endpoint default response has a 4xx status code
+func (o *CreateGiteaEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create gitea endpoint default response has a 5xx status code
+func (o *CreateGiteaEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create gitea endpoint default response a status code equal to that given
+func (o *CreateGiteaEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create gitea endpoint default response
+func (o *CreateGiteaEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateGiteaEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/endpoints][%d] CreateGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *CreateGiteaEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/endpoints][%d] CreateGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *CreateGiteaEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateGiteaEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/create_github_endpoint_parameters.go b/client/endpoints/create_github_endpoint_parameters.go
new file mode 100644
index 00000000..030fa167
--- /dev/null
+++ b/client/endpoints/create_github_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateGithubEndpointParams creates a new CreateGithubEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateGithubEndpointParams() *CreateGithubEndpointParams {
+ return &CreateGithubEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateGithubEndpointParamsWithTimeout creates a new CreateGithubEndpointParams object
+// with the ability to set a timeout on a request.
+func NewCreateGithubEndpointParamsWithTimeout(timeout time.Duration) *CreateGithubEndpointParams {
+ return &CreateGithubEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateGithubEndpointParamsWithContext creates a new CreateGithubEndpointParams object
+// with the ability to set a context for a request.
+func NewCreateGithubEndpointParamsWithContext(ctx context.Context) *CreateGithubEndpointParams {
+ return &CreateGithubEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateGithubEndpointParamsWithHTTPClient creates a new CreateGithubEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateGithubEndpointParamsWithHTTPClient(client *http.Client) *CreateGithubEndpointParams {
+ return &CreateGithubEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateGithubEndpointParams contains all the parameters to send to the API endpoint
+
+ for the create github endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateGithubEndpointParams struct {
+
+ /* Body.
+
+ Parameters used when creating a GitHub endpoint.
+ */
+ Body garm_params.CreateGithubEndpointParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGithubEndpointParams) WithDefaults() *CreateGithubEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGithubEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create github endpoint params
+func (o *CreateGithubEndpointParams) WithTimeout(timeout time.Duration) *CreateGithubEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create github endpoint params
+func (o *CreateGithubEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create github endpoint params
+func (o *CreateGithubEndpointParams) WithContext(ctx context.Context) *CreateGithubEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create github endpoint params
+func (o *CreateGithubEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create github endpoint params
+func (o *CreateGithubEndpointParams) WithHTTPClient(client *http.Client) *CreateGithubEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create github endpoint params
+func (o *CreateGithubEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create github endpoint params
+func (o *CreateGithubEndpointParams) WithBody(body garm_params.CreateGithubEndpointParams) *CreateGithubEndpointParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create github endpoint params
+func (o *CreateGithubEndpointParams) SetBody(body garm_params.CreateGithubEndpointParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateGithubEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/create_github_endpoint_responses.go b/client/endpoints/create_github_endpoint_responses.go
new file mode 100644
index 00000000..60961f3a
--- /dev/null
+++ b/client/endpoints/create_github_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateGithubEndpointReader is a Reader for the CreateGithubEndpoint structure.
+type CreateGithubEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateGithubEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateGithubEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateGithubEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateGithubEndpointOK creates a CreateGithubEndpointOK with default headers values
+func NewCreateGithubEndpointOK() *CreateGithubEndpointOK {
+ return &CreateGithubEndpointOK{}
+}
+
+/*
+CreateGithubEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type CreateGithubEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this create github endpoint o k response has a 2xx status code
+func (o *CreateGithubEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create github endpoint o k response has a 3xx status code
+func (o *CreateGithubEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create github endpoint o k response has a 4xx status code
+func (o *CreateGithubEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create github endpoint o k response has a 5xx status code
+func (o *CreateGithubEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create github endpoint o k response a status code equal to that given
+func (o *CreateGithubEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create github endpoint o k response
+func (o *CreateGithubEndpointOK) Code() int {
+ return 200
+}
+
+func (o *CreateGithubEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/endpoints][%d] createGithubEndpointOK %s", 200, payload)
+}
+
+func (o *CreateGithubEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/endpoints][%d] createGithubEndpointOK %s", 200, payload)
+}
+
+func (o *CreateGithubEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *CreateGithubEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateGithubEndpointDefault creates a CreateGithubEndpointDefault with default headers values
+func NewCreateGithubEndpointDefault(code int) *CreateGithubEndpointDefault {
+ return &CreateGithubEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateGithubEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateGithubEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create github endpoint default response has a 2xx status code
+func (o *CreateGithubEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create github endpoint default response has a 3xx status code
+func (o *CreateGithubEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create github endpoint default response has a 4xx status code
+func (o *CreateGithubEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create github endpoint default response has a 5xx status code
+func (o *CreateGithubEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create github endpoint default response a status code equal to that given
+func (o *CreateGithubEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create github endpoint default response
+func (o *CreateGithubEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateGithubEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/endpoints][%d] CreateGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *CreateGithubEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/endpoints][%d] CreateGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *CreateGithubEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateGithubEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/delete_gitea_endpoint_parameters.go b/client/endpoints/delete_gitea_endpoint_parameters.go
new file mode 100644
index 00000000..f7ea5a5d
--- /dev/null
+++ b/client/endpoints/delete_gitea_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteGiteaEndpointParams creates a new DeleteGiteaEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteGiteaEndpointParams() *DeleteGiteaEndpointParams {
+ return &DeleteGiteaEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteGiteaEndpointParamsWithTimeout creates a new DeleteGiteaEndpointParams object
+// with the ability to set a timeout on a request.
+func NewDeleteGiteaEndpointParamsWithTimeout(timeout time.Duration) *DeleteGiteaEndpointParams {
+ return &DeleteGiteaEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteGiteaEndpointParamsWithContext creates a new DeleteGiteaEndpointParams object
+// with the ability to set a context for a request.
+func NewDeleteGiteaEndpointParamsWithContext(ctx context.Context) *DeleteGiteaEndpointParams {
+ return &DeleteGiteaEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteGiteaEndpointParamsWithHTTPClient creates a new DeleteGiteaEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteGiteaEndpointParamsWithHTTPClient(client *http.Client) *DeleteGiteaEndpointParams {
+ return &DeleteGiteaEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteGiteaEndpointParams contains all the parameters to send to the API endpoint
+
+ for the delete gitea endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteGiteaEndpointParams struct {
+
+ /* Name.
+
+ The name of the Gitea endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGiteaEndpointParams) WithDefaults() *DeleteGiteaEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGiteaEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) WithTimeout(timeout time.Duration) *DeleteGiteaEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) WithContext(ctx context.Context) *DeleteGiteaEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) WithHTTPClient(client *http.Client) *DeleteGiteaEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) WithName(name string) *DeleteGiteaEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteGiteaEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/delete_gitea_endpoint_responses.go b/client/endpoints/delete_gitea_endpoint_responses.go
new file mode 100644
index 00000000..787d6585
--- /dev/null
+++ b/client/endpoints/delete_gitea_endpoint_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteGiteaEndpointReader is a Reader for the DeleteGiteaEndpoint structure.
+type DeleteGiteaEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteGiteaEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteGiteaEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteGiteaEndpointDefault creates a DeleteGiteaEndpointDefault with default headers values
+func NewDeleteGiteaEndpointDefault(code int) *DeleteGiteaEndpointDefault {
+ return &DeleteGiteaEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteGiteaEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteGiteaEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete gitea endpoint default response has a 2xx status code
+func (o *DeleteGiteaEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete gitea endpoint default response has a 3xx status code
+func (o *DeleteGiteaEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete gitea endpoint default response has a 4xx status code
+func (o *DeleteGiteaEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete gitea endpoint default response has a 5xx status code
+func (o *DeleteGiteaEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete gitea endpoint default response a status code equal to that given
+func (o *DeleteGiteaEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete gitea endpoint default response
+func (o *DeleteGiteaEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteGiteaEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /gitea/endpoints/{name}][%d] DeleteGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGiteaEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /gitea/endpoints/{name}][%d] DeleteGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGiteaEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteGiteaEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/delete_github_endpoint_parameters.go b/client/endpoints/delete_github_endpoint_parameters.go
new file mode 100644
index 00000000..a02d4107
--- /dev/null
+++ b/client/endpoints/delete_github_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteGithubEndpointParams creates a new DeleteGithubEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteGithubEndpointParams() *DeleteGithubEndpointParams {
+ return &DeleteGithubEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteGithubEndpointParamsWithTimeout creates a new DeleteGithubEndpointParams object
+// with the ability to set a timeout on a request.
+func NewDeleteGithubEndpointParamsWithTimeout(timeout time.Duration) *DeleteGithubEndpointParams {
+ return &DeleteGithubEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteGithubEndpointParamsWithContext creates a new DeleteGithubEndpointParams object
+// with the ability to set a context for a request.
+func NewDeleteGithubEndpointParamsWithContext(ctx context.Context) *DeleteGithubEndpointParams {
+ return &DeleteGithubEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteGithubEndpointParamsWithHTTPClient creates a new DeleteGithubEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteGithubEndpointParamsWithHTTPClient(client *http.Client) *DeleteGithubEndpointParams {
+ return &DeleteGithubEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteGithubEndpointParams contains all the parameters to send to the API endpoint
+
+ for the delete github endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteGithubEndpointParams struct {
+
+ /* Name.
+
+ The name of the GitHub endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGithubEndpointParams) WithDefaults() *DeleteGithubEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGithubEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) WithTimeout(timeout time.Duration) *DeleteGithubEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) WithContext(ctx context.Context) *DeleteGithubEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) WithHTTPClient(client *http.Client) *DeleteGithubEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) WithName(name string) *DeleteGithubEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteGithubEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/delete_github_endpoint_responses.go b/client/endpoints/delete_github_endpoint_responses.go
new file mode 100644
index 00000000..21b3f880
--- /dev/null
+++ b/client/endpoints/delete_github_endpoint_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteGithubEndpointReader is a Reader for the DeleteGithubEndpoint structure.
+type DeleteGithubEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteGithubEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteGithubEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteGithubEndpointDefault creates a DeleteGithubEndpointDefault with default headers values
+func NewDeleteGithubEndpointDefault(code int) *DeleteGithubEndpointDefault {
+ return &DeleteGithubEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteGithubEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteGithubEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete github endpoint default response has a 2xx status code
+func (o *DeleteGithubEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete github endpoint default response has a 3xx status code
+func (o *DeleteGithubEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete github endpoint default response has a 4xx status code
+func (o *DeleteGithubEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete github endpoint default response has a 5xx status code
+func (o *DeleteGithubEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete github endpoint default response a status code equal to that given
+func (o *DeleteGithubEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete github endpoint default response
+func (o *DeleteGithubEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteGithubEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /github/endpoints/{name}][%d] DeleteGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGithubEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /github/endpoints/{name}][%d] DeleteGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGithubEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteGithubEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/endpoints_client.go b/client/endpoints/endpoints_client.go
new file mode 100644
index 00000000..74019577
--- /dev/null
+++ b/client/endpoints/endpoints_client.go
@@ -0,0 +1,451 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new endpoints API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new endpoints API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new endpoints API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for endpoints API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ CreateGiteaEndpoint(params *CreateGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGiteaEndpointOK, error)
+
+ CreateGithubEndpoint(params *CreateGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGithubEndpointOK, error)
+
+ DeleteGiteaEndpoint(params *DeleteGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ DeleteGithubEndpoint(params *DeleteGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetGiteaEndpoint(params *GetGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGiteaEndpointOK, error)
+
+ GetGithubEndpoint(params *GetGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGithubEndpointOK, error)
+
+ ListGiteaEndpoints(params *ListGiteaEndpointsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGiteaEndpointsOK, error)
+
+ ListGithubEndpoints(params *ListGithubEndpointsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGithubEndpointsOK, error)
+
+ UpdateGiteaEndpoint(params *UpdateGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGiteaEndpointOK, error)
+
+ UpdateGithubEndpoint(params *UpdateGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGithubEndpointOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+CreateGiteaEndpoint creates a gitea endpoint
+*/
+func (a *Client) CreateGiteaEndpoint(params *CreateGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGiteaEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateGiteaEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateGiteaEndpoint",
+ Method: "POST",
+ PathPattern: "/gitea/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateGiteaEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateGiteaEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateGiteaEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+CreateGithubEndpoint creates a git hub endpoint
+*/
+func (a *Client) CreateGithubEndpoint(params *CreateGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGithubEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateGithubEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateGithubEndpoint",
+ Method: "POST",
+ PathPattern: "/github/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateGithubEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateGithubEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateGithubEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+DeleteGiteaEndpoint deletes a gitea endpoint
+*/
+func (a *Client) DeleteGiteaEndpoint(params *DeleteGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteGiteaEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteGiteaEndpoint",
+ Method: "DELETE",
+ PathPattern: "/gitea/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteGiteaEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+DeleteGithubEndpoint deletes a git hub endpoint
+*/
+func (a *Client) DeleteGithubEndpoint(params *DeleteGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteGithubEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteGithubEndpoint",
+ Method: "DELETE",
+ PathPattern: "/github/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteGithubEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetGiteaEndpoint gets a gitea endpoint
+*/
+func (a *Client) GetGiteaEndpoint(params *GetGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGiteaEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetGiteaEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetGiteaEndpoint",
+ Method: "GET",
+ PathPattern: "/gitea/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetGiteaEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetGiteaEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetGiteaEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetGithubEndpoint gets a git hub endpoint
+*/
+func (a *Client) GetGithubEndpoint(params *GetGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGithubEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetGithubEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetGithubEndpoint",
+ Method: "GET",
+ PathPattern: "/github/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetGithubEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetGithubEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetGithubEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListGiteaEndpoints lists all gitea endpoints
+*/
+func (a *Client) ListGiteaEndpoints(params *ListGiteaEndpointsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGiteaEndpointsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListGiteaEndpointsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListGiteaEndpoints",
+ Method: "GET",
+ PathPattern: "/gitea/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListGiteaEndpointsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListGiteaEndpointsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListGiteaEndpointsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListGithubEndpoints lists all git hub endpoints
+*/
+func (a *Client) ListGithubEndpoints(params *ListGithubEndpointsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGithubEndpointsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListGithubEndpointsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListGithubEndpoints",
+ Method: "GET",
+ PathPattern: "/github/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListGithubEndpointsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListGithubEndpointsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListGithubEndpointsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateGiteaEndpoint updates a gitea endpoint
+*/
+func (a *Client) UpdateGiteaEndpoint(params *UpdateGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGiteaEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateGiteaEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateGiteaEndpoint",
+ Method: "PUT",
+ PathPattern: "/gitea/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateGiteaEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateGiteaEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateGiteaEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateGithubEndpoint updates a git hub endpoint
+*/
+func (a *Client) UpdateGithubEndpoint(params *UpdateGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGithubEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateGithubEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateGithubEndpoint",
+ Method: "PUT",
+ PathPattern: "/github/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateGithubEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateGithubEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateGithubEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/endpoints/get_gitea_endpoint_parameters.go b/client/endpoints/get_gitea_endpoint_parameters.go
new file mode 100644
index 00000000..0d7f883b
--- /dev/null
+++ b/client/endpoints/get_gitea_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetGiteaEndpointParams creates a new GetGiteaEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetGiteaEndpointParams() *GetGiteaEndpointParams {
+ return &GetGiteaEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetGiteaEndpointParamsWithTimeout creates a new GetGiteaEndpointParams object
+// with the ability to set a timeout on a request.
+func NewGetGiteaEndpointParamsWithTimeout(timeout time.Duration) *GetGiteaEndpointParams {
+ return &GetGiteaEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetGiteaEndpointParamsWithContext creates a new GetGiteaEndpointParams object
+// with the ability to set a context for a request.
+func NewGetGiteaEndpointParamsWithContext(ctx context.Context) *GetGiteaEndpointParams {
+ return &GetGiteaEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewGetGiteaEndpointParamsWithHTTPClient creates a new GetGiteaEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetGiteaEndpointParamsWithHTTPClient(client *http.Client) *GetGiteaEndpointParams {
+ return &GetGiteaEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetGiteaEndpointParams contains all the parameters to send to the API endpoint
+
+ for the get gitea endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetGiteaEndpointParams struct {
+
+ /* Name.
+
+ The name of the Gitea endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGiteaEndpointParams) WithDefaults() *GetGiteaEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGiteaEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) WithTimeout(timeout time.Duration) *GetGiteaEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) WithContext(ctx context.Context) *GetGiteaEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) WithHTTPClient(client *http.Client) *GetGiteaEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) WithName(name string) *GetGiteaEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetGiteaEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/get_gitea_endpoint_responses.go b/client/endpoints/get_gitea_endpoint_responses.go
new file mode 100644
index 00000000..e4bacd03
--- /dev/null
+++ b/client/endpoints/get_gitea_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetGiteaEndpointReader is a Reader for the GetGiteaEndpoint structure.
+type GetGiteaEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetGiteaEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetGiteaEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetGiteaEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetGiteaEndpointOK creates a GetGiteaEndpointOK with default headers values
+func NewGetGiteaEndpointOK() *GetGiteaEndpointOK {
+ return &GetGiteaEndpointOK{}
+}
+
+/*
+GetGiteaEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type GetGiteaEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this get gitea endpoint o k response has a 2xx status code
+func (o *GetGiteaEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get gitea endpoint o k response has a 3xx status code
+func (o *GetGiteaEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get gitea endpoint o k response has a 4xx status code
+func (o *GetGiteaEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get gitea endpoint o k response has a 5xx status code
+func (o *GetGiteaEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get gitea endpoint o k response a status code equal to that given
+func (o *GetGiteaEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get gitea endpoint o k response
+func (o *GetGiteaEndpointOK) Code() int {
+ return 200
+}
+
+func (o *GetGiteaEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints/{name}][%d] getGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *GetGiteaEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints/{name}][%d] getGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *GetGiteaEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *GetGiteaEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetGiteaEndpointDefault creates a GetGiteaEndpointDefault with default headers values
+func NewGetGiteaEndpointDefault(code int) *GetGiteaEndpointDefault {
+ return &GetGiteaEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetGiteaEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetGiteaEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get gitea endpoint default response has a 2xx status code
+func (o *GetGiteaEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get gitea endpoint default response has a 3xx status code
+func (o *GetGiteaEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get gitea endpoint default response has a 4xx status code
+func (o *GetGiteaEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get gitea endpoint default response has a 5xx status code
+func (o *GetGiteaEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get gitea endpoint default response a status code equal to that given
+func (o *GetGiteaEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get gitea endpoint default response
+func (o *GetGiteaEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetGiteaEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints/{name}][%d] GetGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *GetGiteaEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints/{name}][%d] GetGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *GetGiteaEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetGiteaEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/get_github_endpoint_parameters.go b/client/endpoints/get_github_endpoint_parameters.go
new file mode 100644
index 00000000..7bd9ca00
--- /dev/null
+++ b/client/endpoints/get_github_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetGithubEndpointParams creates a new GetGithubEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetGithubEndpointParams() *GetGithubEndpointParams {
+ return &GetGithubEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetGithubEndpointParamsWithTimeout creates a new GetGithubEndpointParams object
+// with the ability to set a timeout on a request.
+func NewGetGithubEndpointParamsWithTimeout(timeout time.Duration) *GetGithubEndpointParams {
+ return &GetGithubEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetGithubEndpointParamsWithContext creates a new GetGithubEndpointParams object
+// with the ability to set a context for a request.
+func NewGetGithubEndpointParamsWithContext(ctx context.Context) *GetGithubEndpointParams {
+ return &GetGithubEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewGetGithubEndpointParamsWithHTTPClient creates a new GetGithubEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetGithubEndpointParamsWithHTTPClient(client *http.Client) *GetGithubEndpointParams {
+ return &GetGithubEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetGithubEndpointParams contains all the parameters to send to the API endpoint
+
+ for the get github endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetGithubEndpointParams struct {
+
+ /* Name.
+
+ The name of the GitHub endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGithubEndpointParams) WithDefaults() *GetGithubEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGithubEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get github endpoint params
+func (o *GetGithubEndpointParams) WithTimeout(timeout time.Duration) *GetGithubEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get github endpoint params
+func (o *GetGithubEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get github endpoint params
+func (o *GetGithubEndpointParams) WithContext(ctx context.Context) *GetGithubEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get github endpoint params
+func (o *GetGithubEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get github endpoint params
+func (o *GetGithubEndpointParams) WithHTTPClient(client *http.Client) *GetGithubEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get github endpoint params
+func (o *GetGithubEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the get github endpoint params
+func (o *GetGithubEndpointParams) WithName(name string) *GetGithubEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the get github endpoint params
+func (o *GetGithubEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetGithubEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/get_github_endpoint_responses.go b/client/endpoints/get_github_endpoint_responses.go
new file mode 100644
index 00000000..e2b97a60
--- /dev/null
+++ b/client/endpoints/get_github_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetGithubEndpointReader is a Reader for the GetGithubEndpoint structure.
+type GetGithubEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetGithubEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetGithubEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetGithubEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetGithubEndpointOK creates a GetGithubEndpointOK with default headers values
+func NewGetGithubEndpointOK() *GetGithubEndpointOK {
+ return &GetGithubEndpointOK{}
+}
+
+/*
+GetGithubEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type GetGithubEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this get github endpoint o k response has a 2xx status code
+func (o *GetGithubEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get github endpoint o k response has a 3xx status code
+func (o *GetGithubEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get github endpoint o k response has a 4xx status code
+func (o *GetGithubEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get github endpoint o k response has a 5xx status code
+func (o *GetGithubEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get github endpoint o k response a status code equal to that given
+func (o *GetGithubEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get github endpoint o k response
+func (o *GetGithubEndpointOK) Code() int {
+ return 200
+}
+
+func (o *GetGithubEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints/{name}][%d] getGithubEndpointOK %s", 200, payload)
+}
+
+func (o *GetGithubEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints/{name}][%d] getGithubEndpointOK %s", 200, payload)
+}
+
+func (o *GetGithubEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *GetGithubEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetGithubEndpointDefault creates a GetGithubEndpointDefault with default headers values
+func NewGetGithubEndpointDefault(code int) *GetGithubEndpointDefault {
+ return &GetGithubEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetGithubEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetGithubEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get github endpoint default response has a 2xx status code
+func (o *GetGithubEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get github endpoint default response has a 3xx status code
+func (o *GetGithubEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get github endpoint default response has a 4xx status code
+func (o *GetGithubEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get github endpoint default response has a 5xx status code
+func (o *GetGithubEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get github endpoint default response a status code equal to that given
+func (o *GetGithubEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get github endpoint default response
+func (o *GetGithubEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetGithubEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints/{name}][%d] GetGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *GetGithubEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints/{name}][%d] GetGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *GetGithubEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetGithubEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/list_gitea_endpoints_parameters.go b/client/endpoints/list_gitea_endpoints_parameters.go
new file mode 100644
index 00000000..93ec6ae6
--- /dev/null
+++ b/client/endpoints/list_gitea_endpoints_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListGiteaEndpointsParams creates a new ListGiteaEndpointsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListGiteaEndpointsParams() *ListGiteaEndpointsParams {
+ return &ListGiteaEndpointsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListGiteaEndpointsParamsWithTimeout creates a new ListGiteaEndpointsParams object
+// with the ability to set a timeout on a request.
+func NewListGiteaEndpointsParamsWithTimeout(timeout time.Duration) *ListGiteaEndpointsParams {
+ return &ListGiteaEndpointsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListGiteaEndpointsParamsWithContext creates a new ListGiteaEndpointsParams object
+// with the ability to set a context for a request.
+func NewListGiteaEndpointsParamsWithContext(ctx context.Context) *ListGiteaEndpointsParams {
+ return &ListGiteaEndpointsParams{
+ Context: ctx,
+ }
+}
+
+// NewListGiteaEndpointsParamsWithHTTPClient creates a new ListGiteaEndpointsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListGiteaEndpointsParamsWithHTTPClient(client *http.Client) *ListGiteaEndpointsParams {
+ return &ListGiteaEndpointsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListGiteaEndpointsParams contains all the parameters to send to the API endpoint
+
+ for the list gitea endpoints operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListGiteaEndpointsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list gitea endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGiteaEndpointsParams) WithDefaults() *ListGiteaEndpointsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list gitea endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGiteaEndpointsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) WithTimeout(timeout time.Duration) *ListGiteaEndpointsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) WithContext(ctx context.Context) *ListGiteaEndpointsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) WithHTTPClient(client *http.Client) *ListGiteaEndpointsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListGiteaEndpointsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/list_gitea_endpoints_responses.go b/client/endpoints/list_gitea_endpoints_responses.go
new file mode 100644
index 00000000..0fdd90ec
--- /dev/null
+++ b/client/endpoints/list_gitea_endpoints_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListGiteaEndpointsReader is a Reader for the ListGiteaEndpoints structure.
+type ListGiteaEndpointsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListGiteaEndpointsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListGiteaEndpointsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListGiteaEndpointsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListGiteaEndpointsOK creates a ListGiteaEndpointsOK with default headers values
+func NewListGiteaEndpointsOK() *ListGiteaEndpointsOK {
+ return &ListGiteaEndpointsOK{}
+}
+
+/*
+ListGiteaEndpointsOK describes a response with status code 200, with default header values.
+
+ForgeEndpoints
+*/
+type ListGiteaEndpointsOK struct {
+ Payload garm_params.ForgeEndpoints
+}
+
+// IsSuccess returns true when this list gitea endpoints o k response has a 2xx status code
+func (o *ListGiteaEndpointsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list gitea endpoints o k response has a 3xx status code
+func (o *ListGiteaEndpointsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list gitea endpoints o k response has a 4xx status code
+func (o *ListGiteaEndpointsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list gitea endpoints o k response has a 5xx status code
+func (o *ListGiteaEndpointsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list gitea endpoints o k response a status code equal to that given
+func (o *ListGiteaEndpointsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list gitea endpoints o k response
+func (o *ListGiteaEndpointsOK) Code() int {
+ return 200
+}
+
+func (o *ListGiteaEndpointsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints][%d] listGiteaEndpointsOK %s", 200, payload)
+}
+
+func (o *ListGiteaEndpointsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints][%d] listGiteaEndpointsOK %s", 200, payload)
+}
+
+func (o *ListGiteaEndpointsOK) GetPayload() garm_params.ForgeEndpoints {
+ return o.Payload
+}
+
+func (o *ListGiteaEndpointsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListGiteaEndpointsDefault creates a ListGiteaEndpointsDefault with default headers values
+func NewListGiteaEndpointsDefault(code int) *ListGiteaEndpointsDefault {
+ return &ListGiteaEndpointsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListGiteaEndpointsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListGiteaEndpointsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list gitea endpoints default response has a 2xx status code
+func (o *ListGiteaEndpointsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list gitea endpoints default response has a 3xx status code
+func (o *ListGiteaEndpointsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list gitea endpoints default response has a 4xx status code
+func (o *ListGiteaEndpointsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list gitea endpoints default response has a 5xx status code
+func (o *ListGiteaEndpointsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list gitea endpoints default response a status code equal to that given
+func (o *ListGiteaEndpointsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list gitea endpoints default response
+func (o *ListGiteaEndpointsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListGiteaEndpointsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints][%d] ListGiteaEndpoints default %s", o._statusCode, payload)
+}
+
+func (o *ListGiteaEndpointsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints][%d] ListGiteaEndpoints default %s", o._statusCode, payload)
+}
+
+func (o *ListGiteaEndpointsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListGiteaEndpointsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/list_github_endpoints_parameters.go b/client/endpoints/list_github_endpoints_parameters.go
new file mode 100644
index 00000000..c002cfe4
--- /dev/null
+++ b/client/endpoints/list_github_endpoints_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListGithubEndpointsParams creates a new ListGithubEndpointsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListGithubEndpointsParams() *ListGithubEndpointsParams {
+ return &ListGithubEndpointsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListGithubEndpointsParamsWithTimeout creates a new ListGithubEndpointsParams object
+// with the ability to set a timeout on a request.
+func NewListGithubEndpointsParamsWithTimeout(timeout time.Duration) *ListGithubEndpointsParams {
+ return &ListGithubEndpointsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListGithubEndpointsParamsWithContext creates a new ListGithubEndpointsParams object
+// with the ability to set a context for a request.
+func NewListGithubEndpointsParamsWithContext(ctx context.Context) *ListGithubEndpointsParams {
+ return &ListGithubEndpointsParams{
+ Context: ctx,
+ }
+}
+
+// NewListGithubEndpointsParamsWithHTTPClient creates a new ListGithubEndpointsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListGithubEndpointsParamsWithHTTPClient(client *http.Client) *ListGithubEndpointsParams {
+ return &ListGithubEndpointsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListGithubEndpointsParams contains all the parameters to send to the API endpoint
+
+ for the list github endpoints operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListGithubEndpointsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list github endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGithubEndpointsParams) WithDefaults() *ListGithubEndpointsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list github endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGithubEndpointsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list github endpoints params
+func (o *ListGithubEndpointsParams) WithTimeout(timeout time.Duration) *ListGithubEndpointsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list github endpoints params
+func (o *ListGithubEndpointsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list github endpoints params
+func (o *ListGithubEndpointsParams) WithContext(ctx context.Context) *ListGithubEndpointsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list github endpoints params
+func (o *ListGithubEndpointsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list github endpoints params
+func (o *ListGithubEndpointsParams) WithHTTPClient(client *http.Client) *ListGithubEndpointsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list github endpoints params
+func (o *ListGithubEndpointsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListGithubEndpointsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/list_github_endpoints_responses.go b/client/endpoints/list_github_endpoints_responses.go
new file mode 100644
index 00000000..33485f9b
--- /dev/null
+++ b/client/endpoints/list_github_endpoints_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListGithubEndpointsReader is a Reader for the ListGithubEndpoints structure.
+type ListGithubEndpointsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListGithubEndpointsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListGithubEndpointsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListGithubEndpointsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListGithubEndpointsOK creates a ListGithubEndpointsOK with default headers values
+func NewListGithubEndpointsOK() *ListGithubEndpointsOK {
+ return &ListGithubEndpointsOK{}
+}
+
+/*
+ListGithubEndpointsOK describes a response with status code 200, with default header values.
+
+ForgeEndpoints
+*/
+type ListGithubEndpointsOK struct {
+ Payload garm_params.ForgeEndpoints
+}
+
+// IsSuccess returns true when this list github endpoints o k response has a 2xx status code
+func (o *ListGithubEndpointsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list github endpoints o k response has a 3xx status code
+func (o *ListGithubEndpointsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list github endpoints o k response has a 4xx status code
+func (o *ListGithubEndpointsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list github endpoints o k response has a 5xx status code
+func (o *ListGithubEndpointsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list github endpoints o k response a status code equal to that given
+func (o *ListGithubEndpointsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list github endpoints o k response
+func (o *ListGithubEndpointsOK) Code() int {
+ return 200
+}
+
+func (o *ListGithubEndpointsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints][%d] listGithubEndpointsOK %s", 200, payload)
+}
+
+func (o *ListGithubEndpointsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints][%d] listGithubEndpointsOK %s", 200, payload)
+}
+
+func (o *ListGithubEndpointsOK) GetPayload() garm_params.ForgeEndpoints {
+ return o.Payload
+}
+
+func (o *ListGithubEndpointsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListGithubEndpointsDefault creates a ListGithubEndpointsDefault with default headers values
+func NewListGithubEndpointsDefault(code int) *ListGithubEndpointsDefault {
+ return &ListGithubEndpointsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListGithubEndpointsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListGithubEndpointsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list github endpoints default response has a 2xx status code
+func (o *ListGithubEndpointsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list github endpoints default response has a 3xx status code
+func (o *ListGithubEndpointsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list github endpoints default response has a 4xx status code
+func (o *ListGithubEndpointsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list github endpoints default response has a 5xx status code
+func (o *ListGithubEndpointsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list github endpoints default response a status code equal to that given
+func (o *ListGithubEndpointsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list github endpoints default response
+func (o *ListGithubEndpointsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListGithubEndpointsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints][%d] ListGithubEndpoints default %s", o._statusCode, payload)
+}
+
+func (o *ListGithubEndpointsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints][%d] ListGithubEndpoints default %s", o._statusCode, payload)
+}
+
+func (o *ListGithubEndpointsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListGithubEndpointsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/update_gitea_endpoint_parameters.go b/client/endpoints/update_gitea_endpoint_parameters.go
new file mode 100644
index 00000000..bfd18e2e
--- /dev/null
+++ b/client/endpoints/update_gitea_endpoint_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateGiteaEndpointParams creates a new UpdateGiteaEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateGiteaEndpointParams() *UpdateGiteaEndpointParams {
+ return &UpdateGiteaEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateGiteaEndpointParamsWithTimeout creates a new UpdateGiteaEndpointParams object
+// with the ability to set a timeout on a request.
+func NewUpdateGiteaEndpointParamsWithTimeout(timeout time.Duration) *UpdateGiteaEndpointParams {
+ return &UpdateGiteaEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateGiteaEndpointParamsWithContext creates a new UpdateGiteaEndpointParams object
+// with the ability to set a context for a request.
+func NewUpdateGiteaEndpointParamsWithContext(ctx context.Context) *UpdateGiteaEndpointParams {
+ return &UpdateGiteaEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateGiteaEndpointParamsWithHTTPClient creates a new UpdateGiteaEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateGiteaEndpointParamsWithHTTPClient(client *http.Client) *UpdateGiteaEndpointParams {
+ return &UpdateGiteaEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateGiteaEndpointParams contains all the parameters to send to the API endpoint
+
+ for the update gitea endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateGiteaEndpointParams struct {
+
+ /* Body.
+
+ Parameters used when updating a Gitea endpoint.
+ */
+ Body garm_params.UpdateGiteaEndpointParams
+
+ /* Name.
+
+ The name of the Gitea endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGiteaEndpointParams) WithDefaults() *UpdateGiteaEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGiteaEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithTimeout(timeout time.Duration) *UpdateGiteaEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithContext(ctx context.Context) *UpdateGiteaEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithHTTPClient(client *http.Client) *UpdateGiteaEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithBody(body garm_params.UpdateGiteaEndpointParams) *UpdateGiteaEndpointParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetBody(body garm_params.UpdateGiteaEndpointParams) {
+ o.Body = body
+}
+
+// WithName adds the name to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithName(name string) *UpdateGiteaEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateGiteaEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/update_gitea_endpoint_responses.go b/client/endpoints/update_gitea_endpoint_responses.go
new file mode 100644
index 00000000..052f45fa
--- /dev/null
+++ b/client/endpoints/update_gitea_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateGiteaEndpointReader is a Reader for the UpdateGiteaEndpoint structure.
+type UpdateGiteaEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateGiteaEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateGiteaEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateGiteaEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateGiteaEndpointOK creates a UpdateGiteaEndpointOK with default headers values
+func NewUpdateGiteaEndpointOK() *UpdateGiteaEndpointOK {
+ return &UpdateGiteaEndpointOK{}
+}
+
+/*
+UpdateGiteaEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type UpdateGiteaEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this update gitea endpoint o k response has a 2xx status code
+func (o *UpdateGiteaEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update gitea endpoint o k response has a 3xx status code
+func (o *UpdateGiteaEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update gitea endpoint o k response has a 4xx status code
+func (o *UpdateGiteaEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update gitea endpoint o k response has a 5xx status code
+func (o *UpdateGiteaEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update gitea endpoint o k response a status code equal to that given
+func (o *UpdateGiteaEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update gitea endpoint o k response
+func (o *UpdateGiteaEndpointOK) Code() int {
+ return 200
+}
+
+func (o *UpdateGiteaEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/endpoints/{name}][%d] updateGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *UpdateGiteaEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/endpoints/{name}][%d] updateGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *UpdateGiteaEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *UpdateGiteaEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateGiteaEndpointDefault creates a UpdateGiteaEndpointDefault with default headers values
+func NewUpdateGiteaEndpointDefault(code int) *UpdateGiteaEndpointDefault {
+ return &UpdateGiteaEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateGiteaEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateGiteaEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update gitea endpoint default response has a 2xx status code
+func (o *UpdateGiteaEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update gitea endpoint default response has a 3xx status code
+func (o *UpdateGiteaEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update gitea endpoint default response has a 4xx status code
+func (o *UpdateGiteaEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update gitea endpoint default response has a 5xx status code
+func (o *UpdateGiteaEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update gitea endpoint default response a status code equal to that given
+func (o *UpdateGiteaEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update gitea endpoint default response
+func (o *UpdateGiteaEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateGiteaEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/endpoints/{name}][%d] UpdateGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *UpdateGiteaEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/endpoints/{name}][%d] UpdateGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *UpdateGiteaEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateGiteaEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/update_github_endpoint_parameters.go b/client/endpoints/update_github_endpoint_parameters.go
new file mode 100644
index 00000000..35ee713a
--- /dev/null
+++ b/client/endpoints/update_github_endpoint_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateGithubEndpointParams creates a new UpdateGithubEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateGithubEndpointParams() *UpdateGithubEndpointParams {
+ return &UpdateGithubEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateGithubEndpointParamsWithTimeout creates a new UpdateGithubEndpointParams object
+// with the ability to set a timeout on a request.
+func NewUpdateGithubEndpointParamsWithTimeout(timeout time.Duration) *UpdateGithubEndpointParams {
+ return &UpdateGithubEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateGithubEndpointParamsWithContext creates a new UpdateGithubEndpointParams object
+// with the ability to set a context for a request.
+func NewUpdateGithubEndpointParamsWithContext(ctx context.Context) *UpdateGithubEndpointParams {
+ return &UpdateGithubEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateGithubEndpointParamsWithHTTPClient creates a new UpdateGithubEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateGithubEndpointParamsWithHTTPClient(client *http.Client) *UpdateGithubEndpointParams {
+ return &UpdateGithubEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateGithubEndpointParams contains all the parameters to send to the API endpoint
+
+ for the update github endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateGithubEndpointParams struct {
+
+ /* Body.
+
+ Parameters used when updating a GitHub endpoint.
+ */
+ Body garm_params.UpdateGithubEndpointParams
+
+ /* Name.
+
+ The name of the GitHub endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGithubEndpointParams) WithDefaults() *UpdateGithubEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGithubEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithTimeout(timeout time.Duration) *UpdateGithubEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithContext(ctx context.Context) *UpdateGithubEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithHTTPClient(client *http.Client) *UpdateGithubEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithBody(body garm_params.UpdateGithubEndpointParams) *UpdateGithubEndpointParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetBody(body garm_params.UpdateGithubEndpointParams) {
+ o.Body = body
+}
+
+// WithName adds the name to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithName(name string) *UpdateGithubEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateGithubEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/update_github_endpoint_responses.go b/client/endpoints/update_github_endpoint_responses.go
new file mode 100644
index 00000000..27cd4a71
--- /dev/null
+++ b/client/endpoints/update_github_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateGithubEndpointReader is a Reader for the UpdateGithubEndpoint structure.
+type UpdateGithubEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateGithubEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateGithubEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateGithubEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateGithubEndpointOK creates a UpdateGithubEndpointOK with default headers values
+func NewUpdateGithubEndpointOK() *UpdateGithubEndpointOK {
+ return &UpdateGithubEndpointOK{}
+}
+
+/*
+UpdateGithubEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type UpdateGithubEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this update github endpoint o k response has a 2xx status code
+func (o *UpdateGithubEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update github endpoint o k response has a 3xx status code
+func (o *UpdateGithubEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update github endpoint o k response has a 4xx status code
+func (o *UpdateGithubEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update github endpoint o k response has a 5xx status code
+func (o *UpdateGithubEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update github endpoint o k response a status code equal to that given
+func (o *UpdateGithubEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update github endpoint o k response
+func (o *UpdateGithubEndpointOK) Code() int {
+ return 200
+}
+
+func (o *UpdateGithubEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/endpoints/{name}][%d] updateGithubEndpointOK %s", 200, payload)
+}
+
+func (o *UpdateGithubEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/endpoints/{name}][%d] updateGithubEndpointOK %s", 200, payload)
+}
+
+func (o *UpdateGithubEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *UpdateGithubEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateGithubEndpointDefault creates a UpdateGithubEndpointDefault with default headers values
+func NewUpdateGithubEndpointDefault(code int) *UpdateGithubEndpointDefault {
+ return &UpdateGithubEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateGithubEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateGithubEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update github endpoint default response has a 2xx status code
+func (o *UpdateGithubEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update github endpoint default response has a 3xx status code
+func (o *UpdateGithubEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update github endpoint default response has a 4xx status code
+func (o *UpdateGithubEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update github endpoint default response has a 5xx status code
+func (o *UpdateGithubEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update github endpoint default response a status code equal to that given
+func (o *UpdateGithubEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update github endpoint default response
+func (o *UpdateGithubEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateGithubEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/endpoints/{name}][%d] UpdateGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *UpdateGithubEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/endpoints/{name}][%d] UpdateGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *UpdateGithubEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateGithubEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/create_enterprise_pool_responses.go b/client/enterprises/create_enterprise_pool_responses.go
index 55acec14..38331fbc 100644
--- a/client/enterprises/create_enterprise_pool_responses.go
+++ b/client/enterprises/create_enterprise_pool_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *CreateEnterprisePoolOK) Code() int {
}
func (o *CreateEnterprisePoolOK) Error() string {
- return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] createEnterprisePoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] createEnterprisePoolOK %s", 200, payload)
}
func (o *CreateEnterprisePoolOK) String() string {
- return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] createEnterprisePoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] createEnterprisePoolOK %s", 200, payload)
}
func (o *CreateEnterprisePoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *CreateEnterprisePoolDefault) Code() int {
}
func (o *CreateEnterprisePoolDefault) Error() string {
- return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] CreateEnterprisePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] CreateEnterprisePool default %s", o._statusCode, payload)
}
func (o *CreateEnterprisePoolDefault) String() string {
- return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] CreateEnterprisePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] CreateEnterprisePool default %s", o._statusCode, payload)
}
func (o *CreateEnterprisePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/create_enterprise_responses.go b/client/enterprises/create_enterprise_responses.go
index 04a8bdfa..6623ad22 100644
--- a/client/enterprises/create_enterprise_responses.go
+++ b/client/enterprises/create_enterprise_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *CreateEnterpriseOK) Code() int {
}
func (o *CreateEnterpriseOK) Error() string {
- return fmt.Sprintf("[POST /enterprises][%d] createEnterpriseOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises][%d] createEnterpriseOK %s", 200, payload)
}
func (o *CreateEnterpriseOK) String() string {
- return fmt.Sprintf("[POST /enterprises][%d] createEnterpriseOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises][%d] createEnterpriseOK %s", 200, payload)
}
func (o *CreateEnterpriseOK) GetPayload() garm_params.Enterprise {
@@ -157,11 +160,13 @@ func (o *CreateEnterpriseDefault) Code() int {
}
func (o *CreateEnterpriseDefault) Error() string {
- return fmt.Sprintf("[POST /enterprises][%d] CreateEnterprise default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises][%d] CreateEnterprise default %s", o._statusCode, payload)
}
func (o *CreateEnterpriseDefault) String() string {
- return fmt.Sprintf("[POST /enterprises][%d] CreateEnterprise default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises][%d] CreateEnterprise default %s", o._statusCode, payload)
}
func (o *CreateEnterpriseDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/create_enterprise_scale_set_parameters.go b/client/enterprises/create_enterprise_scale_set_parameters.go
new file mode 100644
index 00000000..76fe13ec
--- /dev/null
+++ b/client/enterprises/create_enterprise_scale_set_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateEnterpriseScaleSetParams creates a new CreateEnterpriseScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateEnterpriseScaleSetParams() *CreateEnterpriseScaleSetParams {
+ return &CreateEnterpriseScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateEnterpriseScaleSetParamsWithTimeout creates a new CreateEnterpriseScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewCreateEnterpriseScaleSetParamsWithTimeout(timeout time.Duration) *CreateEnterpriseScaleSetParams {
+ return &CreateEnterpriseScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateEnterpriseScaleSetParamsWithContext creates a new CreateEnterpriseScaleSetParams object
+// with the ability to set a context for a request.
+func NewCreateEnterpriseScaleSetParamsWithContext(ctx context.Context) *CreateEnterpriseScaleSetParams {
+ return &CreateEnterpriseScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateEnterpriseScaleSetParamsWithHTTPClient creates a new CreateEnterpriseScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateEnterpriseScaleSetParamsWithHTTPClient(client *http.Client) *CreateEnterpriseScaleSetParams {
+ return &CreateEnterpriseScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateEnterpriseScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the create enterprise scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateEnterpriseScaleSetParams struct {
+
+ /* Body.
+
+ Parameters used when creating the enterprise scale set.
+ */
+ Body garm_params.CreateScaleSetParams
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create enterprise scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateEnterpriseScaleSetParams) WithDefaults() *CreateEnterpriseScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create enterprise scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateEnterpriseScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithTimeout(timeout time.Duration) *CreateEnterpriseScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithContext(ctx context.Context) *CreateEnterpriseScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithHTTPClient(client *http.Client) *CreateEnterpriseScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithBody(body garm_params.CreateScaleSetParams) *CreateEnterpriseScaleSetParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetBody(body garm_params.CreateScaleSetParams) {
+ o.Body = body
+}
+
+// WithEnterpriseID adds the enterpriseID to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithEnterpriseID(enterpriseID string) *CreateEnterpriseScaleSetParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateEnterpriseScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/create_enterprise_scale_set_responses.go b/client/enterprises/create_enterprise_scale_set_responses.go
new file mode 100644
index 00000000..46107fc3
--- /dev/null
+++ b/client/enterprises/create_enterprise_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateEnterpriseScaleSetReader is a Reader for the CreateEnterpriseScaleSet structure.
+type CreateEnterpriseScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateEnterpriseScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateEnterpriseScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateEnterpriseScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateEnterpriseScaleSetOK creates a CreateEnterpriseScaleSetOK with default headers values
+func NewCreateEnterpriseScaleSetOK() *CreateEnterpriseScaleSetOK {
+ return &CreateEnterpriseScaleSetOK{}
+}
+
+/*
+CreateEnterpriseScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type CreateEnterpriseScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this create enterprise scale set o k response has a 2xx status code
+func (o *CreateEnterpriseScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create enterprise scale set o k response has a 3xx status code
+func (o *CreateEnterpriseScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create enterprise scale set o k response has a 4xx status code
+func (o *CreateEnterpriseScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create enterprise scale set o k response has a 5xx status code
+func (o *CreateEnterpriseScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create enterprise scale set o k response a status code equal to that given
+func (o *CreateEnterpriseScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create enterprise scale set o k response
+func (o *CreateEnterpriseScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *CreateEnterpriseScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/scalesets][%d] createEnterpriseScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateEnterpriseScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/scalesets][%d] createEnterpriseScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateEnterpriseScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *CreateEnterpriseScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateEnterpriseScaleSetDefault creates a CreateEnterpriseScaleSetDefault with default headers values
+func NewCreateEnterpriseScaleSetDefault(code int) *CreateEnterpriseScaleSetDefault {
+ return &CreateEnterpriseScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateEnterpriseScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateEnterpriseScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create enterprise scale set default response has a 2xx status code
+func (o *CreateEnterpriseScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create enterprise scale set default response has a 3xx status code
+func (o *CreateEnterpriseScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create enterprise scale set default response has a 4xx status code
+func (o *CreateEnterpriseScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create enterprise scale set default response has a 5xx status code
+func (o *CreateEnterpriseScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create enterprise scale set default response a status code equal to that given
+func (o *CreateEnterpriseScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create enterprise scale set default response
+func (o *CreateEnterpriseScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateEnterpriseScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/scalesets][%d] CreateEnterpriseScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateEnterpriseScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/scalesets][%d] CreateEnterpriseScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateEnterpriseScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateEnterpriseScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/delete_enterprise_pool_responses.go b/client/enterprises/delete_enterprise_pool_responses.go
index c0348754..88de90b8 100644
--- a/client/enterprises/delete_enterprise_pool_responses.go
+++ b/client/enterprises/delete_enterprise_pool_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -81,11 +82,13 @@ func (o *DeleteEnterprisePoolDefault) Code() int {
}
func (o *DeleteEnterprisePoolDefault) Error() string {
- return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}/pools/{poolID}][%d] DeleteEnterprisePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}/pools/{poolID}][%d] DeleteEnterprisePool default %s", o._statusCode, payload)
}
func (o *DeleteEnterprisePoolDefault) String() string {
- return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}/pools/{poolID}][%d] DeleteEnterprisePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}/pools/{poolID}][%d] DeleteEnterprisePool default %s", o._statusCode, payload)
}
func (o *DeleteEnterprisePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/delete_enterprise_responses.go b/client/enterprises/delete_enterprise_responses.go
index f846cdd0..097f8983 100644
--- a/client/enterprises/delete_enterprise_responses.go
+++ b/client/enterprises/delete_enterprise_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -81,11 +82,13 @@ func (o *DeleteEnterpriseDefault) Code() int {
}
func (o *DeleteEnterpriseDefault) Error() string {
- return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}][%d] DeleteEnterprise default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}][%d] DeleteEnterprise default %s", o._statusCode, payload)
}
func (o *DeleteEnterpriseDefault) String() string {
- return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}][%d] DeleteEnterprise default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}][%d] DeleteEnterprise default %s", o._statusCode, payload)
}
func (o *DeleteEnterpriseDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/enterprises_client.go b/client/enterprises/enterprises_client.go
index a0029c06..0014ca96 100644
--- a/client/enterprises/enterprises_client.go
+++ b/client/enterprises/enterprises_client.go
@@ -7,6 +7,7 @@ package enterprises
import (
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -15,6 +16,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new enterprises API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new enterprises API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for enterprises API
*/
@@ -23,7 +49,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
@@ -32,6 +58,8 @@ type ClientService interface {
CreateEnterprisePool(params *CreateEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterprisePoolOK, error)
+ CreateEnterpriseScaleSet(params *CreateEnterpriseScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterpriseScaleSetOK, error)
+
DeleteEnterprise(params *DeleteEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
DeleteEnterprisePool(params *DeleteEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
@@ -44,6 +72,8 @@ type ClientService interface {
ListEnterprisePools(params *ListEnterprisePoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterprisePoolsOK, error)
+ ListEnterpriseScaleSets(params *ListEnterpriseScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterpriseScaleSetsOK, error)
+
ListEnterprises(params *ListEnterprisesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterprisesOK, error)
UpdateEnterprise(params *UpdateEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateEnterpriseOK, error)
@@ -129,6 +159,44 @@ func (a *Client) CreateEnterprisePool(params *CreateEnterprisePoolParams, authIn
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+CreateEnterpriseScaleSet creates enterprise pool with the parameters given
+*/
+func (a *Client) CreateEnterpriseScaleSet(params *CreateEnterpriseScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterpriseScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateEnterpriseScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateEnterpriseScaleSet",
+ Method: "POST",
+ PathPattern: "/enterprises/{enterpriseID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateEnterpriseScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateEnterpriseScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateEnterpriseScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
/*
DeleteEnterprise deletes enterprise by ID
*/
@@ -345,6 +413,44 @@ func (a *Client) ListEnterprisePools(params *ListEnterprisePoolsParams, authInfo
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+ListEnterpriseScaleSets lists enterprise scale sets
+*/
+func (a *Client) ListEnterpriseScaleSets(params *ListEnterpriseScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterpriseScaleSetsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListEnterpriseScaleSetsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListEnterpriseScaleSets",
+ Method: "GET",
+ PathPattern: "/enterprises/{enterpriseID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListEnterpriseScaleSetsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListEnterpriseScaleSetsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListEnterpriseScaleSetsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
/*
ListEnterprises lists all enterprises
*/
diff --git a/client/enterprises/get_enterprise_pool_responses.go b/client/enterprises/get_enterprise_pool_responses.go
index da0f54ad..df23d774 100644
--- a/client/enterprises/get_enterprise_pool_responses.go
+++ b/client/enterprises/get_enterprise_pool_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *GetEnterprisePoolOK) Code() int {
}
func (o *GetEnterprisePoolOK) Error() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] getEnterprisePoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] getEnterprisePoolOK %s", 200, payload)
}
func (o *GetEnterprisePoolOK) String() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] getEnterprisePoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] getEnterprisePoolOK %s", 200, payload)
}
func (o *GetEnterprisePoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *GetEnterprisePoolDefault) Code() int {
}
func (o *GetEnterprisePoolDefault) Error() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] GetEnterprisePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] GetEnterprisePool default %s", o._statusCode, payload)
}
func (o *GetEnterprisePoolDefault) String() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] GetEnterprisePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] GetEnterprisePool default %s", o._statusCode, payload)
}
func (o *GetEnterprisePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/get_enterprise_responses.go b/client/enterprises/get_enterprise_responses.go
index 896393bd..b617c75f 100644
--- a/client/enterprises/get_enterprise_responses.go
+++ b/client/enterprises/get_enterprise_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *GetEnterpriseOK) Code() int {
}
func (o *GetEnterpriseOK) Error() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] getEnterpriseOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] getEnterpriseOK %s", 200, payload)
}
func (o *GetEnterpriseOK) String() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] getEnterpriseOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] getEnterpriseOK %s", 200, payload)
}
func (o *GetEnterpriseOK) GetPayload() garm_params.Enterprise {
@@ -157,11 +160,13 @@ func (o *GetEnterpriseDefault) Code() int {
}
func (o *GetEnterpriseDefault) Error() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] GetEnterprise default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] GetEnterprise default %s", o._statusCode, payload)
}
func (o *GetEnterpriseDefault) String() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] GetEnterprise default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] GetEnterprise default %s", o._statusCode, payload)
}
func (o *GetEnterpriseDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/list_enterprise_instances_responses.go b/client/enterprises/list_enterprise_instances_responses.go
index bf28ccda..642e4ff2 100644
--- a/client/enterprises/list_enterprise_instances_responses.go
+++ b/client/enterprises/list_enterprise_instances_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListEnterpriseInstancesOK) Code() int {
}
func (o *ListEnterpriseInstancesOK) Error() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] listEnterpriseInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] listEnterpriseInstancesOK %s", 200, payload)
}
func (o *ListEnterpriseInstancesOK) String() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] listEnterpriseInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] listEnterpriseInstancesOK %s", 200, payload)
}
func (o *ListEnterpriseInstancesOK) GetPayload() garm_params.Instances {
@@ -157,11 +160,13 @@ func (o *ListEnterpriseInstancesDefault) Code() int {
}
func (o *ListEnterpriseInstancesDefault) Error() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] ListEnterpriseInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] ListEnterpriseInstances default %s", o._statusCode, payload)
}
func (o *ListEnterpriseInstancesDefault) String() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] ListEnterpriseInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] ListEnterpriseInstances default %s", o._statusCode, payload)
}
func (o *ListEnterpriseInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/list_enterprise_pools_responses.go b/client/enterprises/list_enterprise_pools_responses.go
index 3c228155..29682eb3 100644
--- a/client/enterprises/list_enterprise_pools_responses.go
+++ b/client/enterprises/list_enterprise_pools_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListEnterprisePoolsOK) Code() int {
}
func (o *ListEnterprisePoolsOK) Error() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] listEnterprisePoolsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] listEnterprisePoolsOK %s", 200, payload)
}
func (o *ListEnterprisePoolsOK) String() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] listEnterprisePoolsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] listEnterprisePoolsOK %s", 200, payload)
}
func (o *ListEnterprisePoolsOK) GetPayload() garm_params.Pools {
@@ -157,11 +160,13 @@ func (o *ListEnterprisePoolsDefault) Code() int {
}
func (o *ListEnterprisePoolsDefault) Error() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] ListEnterprisePools default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] ListEnterprisePools default %s", o._statusCode, payload)
}
func (o *ListEnterprisePoolsDefault) String() string {
- return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] ListEnterprisePools default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] ListEnterprisePools default %s", o._statusCode, payload)
}
func (o *ListEnterprisePoolsDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/list_enterprise_scale_sets_parameters.go b/client/enterprises/list_enterprise_scale_sets_parameters.go
new file mode 100644
index 00000000..f835717c
--- /dev/null
+++ b/client/enterprises/list_enterprise_scale_sets_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListEnterpriseScaleSetsParams creates a new ListEnterpriseScaleSetsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListEnterpriseScaleSetsParams() *ListEnterpriseScaleSetsParams {
+ return &ListEnterpriseScaleSetsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListEnterpriseScaleSetsParamsWithTimeout creates a new ListEnterpriseScaleSetsParams object
+// with the ability to set a timeout on a request.
+func NewListEnterpriseScaleSetsParamsWithTimeout(timeout time.Duration) *ListEnterpriseScaleSetsParams {
+ return &ListEnterpriseScaleSetsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListEnterpriseScaleSetsParamsWithContext creates a new ListEnterpriseScaleSetsParams object
+// with the ability to set a context for a request.
+func NewListEnterpriseScaleSetsParamsWithContext(ctx context.Context) *ListEnterpriseScaleSetsParams {
+ return &ListEnterpriseScaleSetsParams{
+ Context: ctx,
+ }
+}
+
+// NewListEnterpriseScaleSetsParamsWithHTTPClient creates a new ListEnterpriseScaleSetsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListEnterpriseScaleSetsParamsWithHTTPClient(client *http.Client) *ListEnterpriseScaleSetsParams {
+ return &ListEnterpriseScaleSetsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListEnterpriseScaleSetsParams contains all the parameters to send to the API endpoint
+
+ for the list enterprise scale sets operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListEnterpriseScaleSetsParams struct {
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list enterprise scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterpriseScaleSetsParams) WithDefaults() *ListEnterpriseScaleSetsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list enterprise scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterpriseScaleSetsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) WithTimeout(timeout time.Duration) *ListEnterpriseScaleSetsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) WithContext(ctx context.Context) *ListEnterpriseScaleSetsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) WithHTTPClient(client *http.Client) *ListEnterpriseScaleSetsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEnterpriseID adds the enterpriseID to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) WithEnterpriseID(enterpriseID string) *ListEnterpriseScaleSetsParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListEnterpriseScaleSetsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/list_enterprise_scale_sets_responses.go b/client/enterprises/list_enterprise_scale_sets_responses.go
new file mode 100644
index 00000000..9c2564c2
--- /dev/null
+++ b/client/enterprises/list_enterprise_scale_sets_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListEnterpriseScaleSetsReader is a Reader for the ListEnterpriseScaleSets structure.
+type ListEnterpriseScaleSetsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListEnterpriseScaleSetsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListEnterpriseScaleSetsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListEnterpriseScaleSetsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListEnterpriseScaleSetsOK creates a ListEnterpriseScaleSetsOK with default headers values
+func NewListEnterpriseScaleSetsOK() *ListEnterpriseScaleSetsOK {
+ return &ListEnterpriseScaleSetsOK{}
+}
+
+/*
+ListEnterpriseScaleSetsOK describes a response with status code 200, with default header values.
+
+ScaleSets
+*/
+type ListEnterpriseScaleSetsOK struct {
+ Payload garm_params.ScaleSets
+}
+
+// IsSuccess returns true when this list enterprise scale sets o k response has a 2xx status code
+func (o *ListEnterpriseScaleSetsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list enterprise scale sets o k response has a 3xx status code
+func (o *ListEnterpriseScaleSetsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list enterprise scale sets o k response has a 4xx status code
+func (o *ListEnterpriseScaleSetsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list enterprise scale sets o k response has a 5xx status code
+func (o *ListEnterpriseScaleSetsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list enterprise scale sets o k response a status code equal to that given
+func (o *ListEnterpriseScaleSetsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list enterprise scale sets o k response
+func (o *ListEnterpriseScaleSetsOK) Code() int {
+ return 200
+}
+
+func (o *ListEnterpriseScaleSetsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/scalesets][%d] listEnterpriseScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListEnterpriseScaleSetsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/scalesets][%d] listEnterpriseScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListEnterpriseScaleSetsOK) GetPayload() garm_params.ScaleSets {
+ return o.Payload
+}
+
+func (o *ListEnterpriseScaleSetsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListEnterpriseScaleSetsDefault creates a ListEnterpriseScaleSetsDefault with default headers values
+func NewListEnterpriseScaleSetsDefault(code int) *ListEnterpriseScaleSetsDefault {
+ return &ListEnterpriseScaleSetsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListEnterpriseScaleSetsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListEnterpriseScaleSetsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list enterprise scale sets default response has a 2xx status code
+func (o *ListEnterpriseScaleSetsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list enterprise scale sets default response has a 3xx status code
+func (o *ListEnterpriseScaleSetsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list enterprise scale sets default response has a 4xx status code
+func (o *ListEnterpriseScaleSetsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list enterprise scale sets default response has a 5xx status code
+func (o *ListEnterpriseScaleSetsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list enterprise scale sets default response a status code equal to that given
+func (o *ListEnterpriseScaleSetsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list enterprise scale sets default response
+func (o *ListEnterpriseScaleSetsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListEnterpriseScaleSetsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/scalesets][%d] ListEnterpriseScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterpriseScaleSetsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/scalesets][%d] ListEnterpriseScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterpriseScaleSetsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListEnterpriseScaleSetsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/list_enterprises_parameters.go b/client/enterprises/list_enterprises_parameters.go
index 83291c5f..44ba108b 100644
--- a/client/enterprises/list_enterprises_parameters.go
+++ b/client/enterprises/list_enterprises_parameters.go
@@ -60,6 +60,19 @@ ListEnterprisesParams contains all the parameters to send to the API endpoint
Typically these are written to a http.Request.
*/
type ListEnterprisesParams struct {
+
+ /* Endpoint.
+
+ Exact endpoint name to filter by
+ */
+ Endpoint *string
+
+ /* Name.
+
+ Exact enterprise name to filter by
+ */
+ Name *string
+
timeout time.Duration
Context context.Context
HTTPClient *http.Client
@@ -113,6 +126,28 @@ func (o *ListEnterprisesParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
+// WithEndpoint adds the endpoint to the list enterprises params
+func (o *ListEnterprisesParams) WithEndpoint(endpoint *string) *ListEnterprisesParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the list enterprises params
+func (o *ListEnterprisesParams) SetEndpoint(endpoint *string) {
+ o.Endpoint = endpoint
+}
+
+// WithName adds the name to the list enterprises params
+func (o *ListEnterprisesParams) WithName(name *string) *ListEnterprisesParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the list enterprises params
+func (o *ListEnterprisesParams) SetName(name *string) {
+ o.Name = name
+}
+
// WriteToRequest writes these params to a swagger request
func (o *ListEnterprisesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
@@ -121,6 +156,40 @@ func (o *ListEnterprisesParams) WriteToRequest(r runtime.ClientRequest, reg strf
}
var res []error
+ if o.Endpoint != nil {
+
+ // query param endpoint
+ var qrEndpoint string
+
+ if o.Endpoint != nil {
+ qrEndpoint = *o.Endpoint
+ }
+ qEndpoint := qrEndpoint
+ if qEndpoint != "" {
+
+ if err := r.SetQueryParam("endpoint", qEndpoint); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Name != nil {
+
+ // query param name
+ var qrName string
+
+ if o.Name != nil {
+ qrName = *o.Name
+ }
+ qName := qrName
+ if qName != "" {
+
+ if err := r.SetQueryParam("name", qName); err != nil {
+ return err
+ }
+ }
+ }
+
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
diff --git a/client/enterprises/list_enterprises_responses.go b/client/enterprises/list_enterprises_responses.go
index 1bb58307..6a2ec69d 100644
--- a/client/enterprises/list_enterprises_responses.go
+++ b/client/enterprises/list_enterprises_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListEnterprisesOK) Code() int {
}
func (o *ListEnterprisesOK) Error() string {
- return fmt.Sprintf("[GET /enterprises][%d] listEnterprisesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises][%d] listEnterprisesOK %s", 200, payload)
}
func (o *ListEnterprisesOK) String() string {
- return fmt.Sprintf("[GET /enterprises][%d] listEnterprisesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises][%d] listEnterprisesOK %s", 200, payload)
}
func (o *ListEnterprisesOK) GetPayload() garm_params.Enterprises {
@@ -157,11 +160,13 @@ func (o *ListEnterprisesDefault) Code() int {
}
func (o *ListEnterprisesDefault) Error() string {
- return fmt.Sprintf("[GET /enterprises][%d] ListEnterprises default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises][%d] ListEnterprises default %s", o._statusCode, payload)
}
func (o *ListEnterprisesDefault) String() string {
- return fmt.Sprintf("[GET /enterprises][%d] ListEnterprises default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises][%d] ListEnterprises default %s", o._statusCode, payload)
}
func (o *ListEnterprisesDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/update_enterprise_pool_responses.go b/client/enterprises/update_enterprise_pool_responses.go
index e20a51c8..25a19974 100644
--- a/client/enterprises/update_enterprise_pool_responses.go
+++ b/client/enterprises/update_enterprise_pool_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *UpdateEnterprisePoolOK) Code() int {
}
func (o *UpdateEnterprisePoolOK) Error() string {
- return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] updateEnterprisePoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] updateEnterprisePoolOK %s", 200, payload)
}
func (o *UpdateEnterprisePoolOK) String() string {
- return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] updateEnterprisePoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] updateEnterprisePoolOK %s", 200, payload)
}
func (o *UpdateEnterprisePoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *UpdateEnterprisePoolDefault) Code() int {
}
func (o *UpdateEnterprisePoolDefault) Error() string {
- return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] UpdateEnterprisePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] UpdateEnterprisePool default %s", o._statusCode, payload)
}
func (o *UpdateEnterprisePoolDefault) String() string {
- return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] UpdateEnterprisePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] UpdateEnterprisePool default %s", o._statusCode, payload)
}
func (o *UpdateEnterprisePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/enterprises/update_enterprise_responses.go b/client/enterprises/update_enterprise_responses.go
index 3bea3347..f6d34781 100644
--- a/client/enterprises/update_enterprise_responses.go
+++ b/client/enterprises/update_enterprise_responses.go
@@ -6,6 +6,7 @@ package enterprises
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *UpdateEnterpriseOK) Code() int {
}
func (o *UpdateEnterpriseOK) Error() string {
- return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] updateEnterpriseOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] updateEnterpriseOK %s", 200, payload)
}
func (o *UpdateEnterpriseOK) String() string {
- return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] updateEnterpriseOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] updateEnterpriseOK %s", 200, payload)
}
func (o *UpdateEnterpriseOK) GetPayload() garm_params.Enterprise {
@@ -157,11 +160,13 @@ func (o *UpdateEnterpriseDefault) Code() int {
}
func (o *UpdateEnterpriseDefault) Error() string {
- return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] UpdateEnterprise default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] UpdateEnterprise default %s", o._statusCode, payload)
}
func (o *UpdateEnterpriseDefault) String() string {
- return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] UpdateEnterprise default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] UpdateEnterprise default %s", o._statusCode, payload)
}
func (o *UpdateEnterpriseDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/first_run/first_run_client.go b/client/first_run/first_run_client.go
index 9066a56e..09792ad5 100644
--- a/client/first_run/first_run_client.go
+++ b/client/first_run/first_run_client.go
@@ -9,6 +9,7 @@ import (
"fmt"
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -17,6 +18,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new first run API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new first run API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for first run API
*/
@@ -25,7 +51,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
diff --git a/client/first_run/first_run_responses.go b/client/first_run/first_run_responses.go
index 9057d7ee..d0d7c2e7 100644
--- a/client/first_run/first_run_responses.go
+++ b/client/first_run/first_run_responses.go
@@ -6,6 +6,7 @@ package first_run
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -86,11 +87,13 @@ func (o *FirstRunOK) Code() int {
}
func (o *FirstRunOK) Error() string {
- return fmt.Sprintf("[POST /first-run][%d] firstRunOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /first-run][%d] firstRunOK %s", 200, payload)
}
func (o *FirstRunOK) String() string {
- return fmt.Sprintf("[POST /first-run][%d] firstRunOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /first-run][%d] firstRunOK %s", 200, payload)
}
func (o *FirstRunOK) GetPayload() garm_params.User {
@@ -152,11 +155,13 @@ func (o *FirstRunBadRequest) Code() int {
}
func (o *FirstRunBadRequest) Error() string {
- return fmt.Sprintf("[POST /first-run][%d] firstRunBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /first-run][%d] firstRunBadRequest %s", 400, payload)
}
func (o *FirstRunBadRequest) String() string {
- return fmt.Sprintf("[POST /first-run][%d] firstRunBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /first-run][%d] firstRunBadRequest %s", 400, payload)
}
func (o *FirstRunBadRequest) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/garm_api_client.go b/client/garm_api_client.go
index 0f9a208a..f5bc51b2 100644
--- a/client/garm_api_client.go
+++ b/client/garm_api_client.go
@@ -10,7 +10,10 @@ import (
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
+ "github.com/cloudbase/garm/client/controller"
+ "github.com/cloudbase/garm/client/controller_info"
"github.com/cloudbase/garm/client/credentials"
+ "github.com/cloudbase/garm/client/endpoints"
"github.com/cloudbase/garm/client/enterprises"
"github.com/cloudbase/garm/client/first_run"
"github.com/cloudbase/garm/client/instances"
@@ -21,6 +24,7 @@ import (
"github.com/cloudbase/garm/client/pools"
"github.com/cloudbase/garm/client/providers"
"github.com/cloudbase/garm/client/repositories"
+ "github.com/cloudbase/garm/client/scalesets"
)
// Default garm API HTTP client.
@@ -65,7 +69,10 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) *GarmAPI {
cli := new(GarmAPI)
cli.Transport = transport
+ cli.Controller = controller.New(transport, formats)
+ cli.ControllerInfo = controller_info.New(transport, formats)
cli.Credentials = credentials.New(transport, formats)
+ cli.Endpoints = endpoints.New(transport, formats)
cli.Enterprises = enterprises.New(transport, formats)
cli.FirstRun = first_run.New(transport, formats)
cli.Instances = instances.New(transport, formats)
@@ -76,6 +83,7 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) *GarmAPI {
cli.Pools = pools.New(transport, formats)
cli.Providers = providers.New(transport, formats)
cli.Repositories = repositories.New(transport, formats)
+ cli.Scalesets = scalesets.New(transport, formats)
return cli
}
@@ -120,8 +128,14 @@ func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig {
// GarmAPI is a client for garm API
type GarmAPI struct {
+ Controller controller.ClientService
+
+ ControllerInfo controller_info.ClientService
+
Credentials credentials.ClientService
+ Endpoints endpoints.ClientService
+
Enterprises enterprises.ClientService
FirstRun first_run.ClientService
@@ -142,13 +156,18 @@ type GarmAPI struct {
Repositories repositories.ClientService
+ Scalesets scalesets.ClientService
+
Transport runtime.ClientTransport
}
// SetTransport changes the transport on the client and all its subresources
func (c *GarmAPI) SetTransport(transport runtime.ClientTransport) {
c.Transport = transport
+ c.Controller.SetTransport(transport)
+ c.ControllerInfo.SetTransport(transport)
c.Credentials.SetTransport(transport)
+ c.Endpoints.SetTransport(transport)
c.Enterprises.SetTransport(transport)
c.FirstRun.SetTransport(transport)
c.Instances.SetTransport(transport)
@@ -159,4 +178,5 @@ func (c *GarmAPI) SetTransport(transport runtime.ClientTransport) {
c.Pools.SetTransport(transport)
c.Providers.SetTransport(transport)
c.Repositories.SetTransport(transport)
+ c.Scalesets.SetTransport(transport)
}
diff --git a/client/instances/delete_instance_parameters.go b/client/instances/delete_instance_parameters.go
index 34cb6eee..4a88ea5b 100644
--- a/client/instances/delete_instance_parameters.go
+++ b/client/instances/delete_instance_parameters.go
@@ -14,6 +14,7 @@ import (
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
)
// NewDeleteInstanceParams creates a new DeleteInstanceParams object,
@@ -61,6 +62,18 @@ DeleteInstanceParams contains all the parameters to send to the API endpoint
*/
type DeleteInstanceParams struct {
+ /* BypassGHUnauthorized.
+
+ If true GARM will ignore unauthorized errors returned by GitHub when removing a runner. This is useful if you want to clean up runners and your credentials have expired.
+ */
+ BypassGHUnauthorized *bool
+
+ /* ForceRemove.
+
+ If true GARM will ignore any provider error when removing the runner and will continue to remove the runner from github and the GARM database.
+ */
+ ForceRemove *bool
+
/* InstanceName.
Runner instance name.
@@ -120,6 +133,28 @@ func (o *DeleteInstanceParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
+// WithBypassGHUnauthorized adds the bypassGHUnauthorized to the delete instance params
+func (o *DeleteInstanceParams) WithBypassGHUnauthorized(bypassGHUnauthorized *bool) *DeleteInstanceParams {
+ o.SetBypassGHUnauthorized(bypassGHUnauthorized)
+ return o
+}
+
+// SetBypassGHUnauthorized adds the bypassGHUnauthorized to the delete instance params
+func (o *DeleteInstanceParams) SetBypassGHUnauthorized(bypassGHUnauthorized *bool) {
+ o.BypassGHUnauthorized = bypassGHUnauthorized
+}
+
+// WithForceRemove adds the forceRemove to the delete instance params
+func (o *DeleteInstanceParams) WithForceRemove(forceRemove *bool) *DeleteInstanceParams {
+ o.SetForceRemove(forceRemove)
+ return o
+}
+
+// SetForceRemove adds the forceRemove to the delete instance params
+func (o *DeleteInstanceParams) SetForceRemove(forceRemove *bool) {
+ o.ForceRemove = forceRemove
+}
+
// WithInstanceName adds the instanceName to the delete instance params
func (o *DeleteInstanceParams) WithInstanceName(instanceName string) *DeleteInstanceParams {
o.SetInstanceName(instanceName)
@@ -139,6 +174,40 @@ func (o *DeleteInstanceParams) WriteToRequest(r runtime.ClientRequest, reg strfm
}
var res []error
+ if o.BypassGHUnauthorized != nil {
+
+ // query param bypassGHUnauthorized
+ var qrBypassGHUnauthorized bool
+
+ if o.BypassGHUnauthorized != nil {
+ qrBypassGHUnauthorized = *o.BypassGHUnauthorized
+ }
+ qBypassGHUnauthorized := swag.FormatBool(qrBypassGHUnauthorized)
+ if qBypassGHUnauthorized != "" {
+
+ if err := r.SetQueryParam("bypassGHUnauthorized", qBypassGHUnauthorized); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.ForceRemove != nil {
+
+ // query param forceRemove
+ var qrForceRemove bool
+
+ if o.ForceRemove != nil {
+ qrForceRemove = *o.ForceRemove
+ }
+ qForceRemove := swag.FormatBool(qrForceRemove)
+ if qForceRemove != "" {
+
+ if err := r.SetQueryParam("forceRemove", qForceRemove); err != nil {
+ return err
+ }
+ }
+ }
+
// path param instanceName
if err := r.SetPathParam("instanceName", o.InstanceName); err != nil {
return err
diff --git a/client/instances/delete_instance_responses.go b/client/instances/delete_instance_responses.go
index 6779c44e..1748f66e 100644
--- a/client/instances/delete_instance_responses.go
+++ b/client/instances/delete_instance_responses.go
@@ -6,6 +6,7 @@ package instances
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -81,11 +82,13 @@ func (o *DeleteInstanceDefault) Code() int {
}
func (o *DeleteInstanceDefault) Error() string {
- return fmt.Sprintf("[DELETE /instances/{instanceName}][%d] DeleteInstance default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /instances/{instanceName}][%d] DeleteInstance default %s", o._statusCode, payload)
}
func (o *DeleteInstanceDefault) String() string {
- return fmt.Sprintf("[DELETE /instances/{instanceName}][%d] DeleteInstance default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /instances/{instanceName}][%d] DeleteInstance default %s", o._statusCode, payload)
}
func (o *DeleteInstanceDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/instances/get_instance_responses.go b/client/instances/get_instance_responses.go
index 3f26f1f8..abdbc131 100644
--- a/client/instances/get_instance_responses.go
+++ b/client/instances/get_instance_responses.go
@@ -6,6 +6,7 @@ package instances
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *GetInstanceOK) Code() int {
}
func (o *GetInstanceOK) Error() string {
- return fmt.Sprintf("[GET /instances/{instanceName}][%d] getInstanceOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances/{instanceName}][%d] getInstanceOK %s", 200, payload)
}
func (o *GetInstanceOK) String() string {
- return fmt.Sprintf("[GET /instances/{instanceName}][%d] getInstanceOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances/{instanceName}][%d] getInstanceOK %s", 200, payload)
}
func (o *GetInstanceOK) GetPayload() garm_params.Instance {
@@ -157,11 +160,13 @@ func (o *GetInstanceDefault) Code() int {
}
func (o *GetInstanceDefault) Error() string {
- return fmt.Sprintf("[GET /instances/{instanceName}][%d] GetInstance default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances/{instanceName}][%d] GetInstance default %s", o._statusCode, payload)
}
func (o *GetInstanceDefault) String() string {
- return fmt.Sprintf("[GET /instances/{instanceName}][%d] GetInstance default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances/{instanceName}][%d] GetInstance default %s", o._statusCode, payload)
}
func (o *GetInstanceDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/instances/instances_client.go b/client/instances/instances_client.go
index fae1d7da..2c41f919 100644
--- a/client/instances/instances_client.go
+++ b/client/instances/instances_client.go
@@ -7,6 +7,7 @@ package instances
import (
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -15,6 +16,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new instances API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new instances API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for instances API
*/
@@ -23,7 +49,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
@@ -36,6 +62,8 @@ type ClientService interface {
ListPoolInstances(params *ListPoolInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListPoolInstancesOK, error)
+ ListScaleSetInstances(params *ListScaleSetInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListScaleSetInstancesOK, error)
+
SetTransport(transport runtime.ClientTransport)
}
@@ -185,6 +213,44 @@ func (a *Client) ListPoolInstances(params *ListPoolInstancesParams, authInfo run
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+ListScaleSetInstances lists runner instances in a scale set
+*/
+func (a *Client) ListScaleSetInstances(params *ListScaleSetInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListScaleSetInstancesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListScaleSetInstancesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListScaleSetInstances",
+ Method: "GET",
+ PathPattern: "/scalesets/{scalesetID}/instances",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListScaleSetInstancesReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListScaleSetInstancesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListScaleSetInstancesDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
diff --git a/client/instances/list_instances_responses.go b/client/instances/list_instances_responses.go
index 5f9953cb..c81d3cf6 100644
--- a/client/instances/list_instances_responses.go
+++ b/client/instances/list_instances_responses.go
@@ -6,6 +6,7 @@ package instances
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListInstancesOK) Code() int {
}
func (o *ListInstancesOK) Error() string {
- return fmt.Sprintf("[GET /instances][%d] listInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances][%d] listInstancesOK %s", 200, payload)
}
func (o *ListInstancesOK) String() string {
- return fmt.Sprintf("[GET /instances][%d] listInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances][%d] listInstancesOK %s", 200, payload)
}
func (o *ListInstancesOK) GetPayload() garm_params.Instances {
@@ -157,11 +160,13 @@ func (o *ListInstancesDefault) Code() int {
}
func (o *ListInstancesDefault) Error() string {
- return fmt.Sprintf("[GET /instances][%d] ListInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances][%d] ListInstances default %s", o._statusCode, payload)
}
func (o *ListInstancesDefault) String() string {
- return fmt.Sprintf("[GET /instances][%d] ListInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances][%d] ListInstances default %s", o._statusCode, payload)
}
func (o *ListInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/instances/list_pool_instances_responses.go b/client/instances/list_pool_instances_responses.go
index 2a61ca06..22e8d313 100644
--- a/client/instances/list_pool_instances_responses.go
+++ b/client/instances/list_pool_instances_responses.go
@@ -6,6 +6,7 @@ package instances
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListPoolInstancesOK) Code() int {
}
func (o *ListPoolInstancesOK) Error() string {
- return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] listPoolInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] listPoolInstancesOK %s", 200, payload)
}
func (o *ListPoolInstancesOK) String() string {
- return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] listPoolInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] listPoolInstancesOK %s", 200, payload)
}
func (o *ListPoolInstancesOK) GetPayload() garm_params.Instances {
@@ -157,11 +160,13 @@ func (o *ListPoolInstancesDefault) Code() int {
}
func (o *ListPoolInstancesDefault) Error() string {
- return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] ListPoolInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] ListPoolInstances default %s", o._statusCode, payload)
}
func (o *ListPoolInstancesDefault) String() string {
- return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] ListPoolInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] ListPoolInstances default %s", o._statusCode, payload)
}
func (o *ListPoolInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/instances/list_scale_set_instances_parameters.go b/client/instances/list_scale_set_instances_parameters.go
new file mode 100644
index 00000000..7b38ef82
--- /dev/null
+++ b/client/instances/list_scale_set_instances_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListScaleSetInstancesParams creates a new ListScaleSetInstancesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListScaleSetInstancesParams() *ListScaleSetInstancesParams {
+ return &ListScaleSetInstancesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListScaleSetInstancesParamsWithTimeout creates a new ListScaleSetInstancesParams object
+// with the ability to set a timeout on a request.
+func NewListScaleSetInstancesParamsWithTimeout(timeout time.Duration) *ListScaleSetInstancesParams {
+ return &ListScaleSetInstancesParams{
+ timeout: timeout,
+ }
+}
+
+// NewListScaleSetInstancesParamsWithContext creates a new ListScaleSetInstancesParams object
+// with the ability to set a context for a request.
+func NewListScaleSetInstancesParamsWithContext(ctx context.Context) *ListScaleSetInstancesParams {
+ return &ListScaleSetInstancesParams{
+ Context: ctx,
+ }
+}
+
+// NewListScaleSetInstancesParamsWithHTTPClient creates a new ListScaleSetInstancesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListScaleSetInstancesParamsWithHTTPClient(client *http.Client) *ListScaleSetInstancesParams {
+ return &ListScaleSetInstancesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListScaleSetInstancesParams contains all the parameters to send to the API endpoint
+
+ for the list scale set instances operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListScaleSetInstancesParams struct {
+
+ /* ScalesetID.
+
+ Runner scale set ID.
+ */
+ ScalesetID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list scale set instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListScaleSetInstancesParams) WithDefaults() *ListScaleSetInstancesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list scale set instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListScaleSetInstancesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list scale set instances params
+func (o *ListScaleSetInstancesParams) WithTimeout(timeout time.Duration) *ListScaleSetInstancesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list scale set instances params
+func (o *ListScaleSetInstancesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list scale set instances params
+func (o *ListScaleSetInstancesParams) WithContext(ctx context.Context) *ListScaleSetInstancesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list scale set instances params
+func (o *ListScaleSetInstancesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list scale set instances params
+func (o *ListScaleSetInstancesParams) WithHTTPClient(client *http.Client) *ListScaleSetInstancesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list scale set instances params
+func (o *ListScaleSetInstancesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithScalesetID adds the scalesetID to the list scale set instances params
+func (o *ListScaleSetInstancesParams) WithScalesetID(scalesetID string) *ListScaleSetInstancesParams {
+ o.SetScalesetID(scalesetID)
+ return o
+}
+
+// SetScalesetID adds the scalesetId to the list scale set instances params
+func (o *ListScaleSetInstancesParams) SetScalesetID(scalesetID string) {
+ o.ScalesetID = scalesetID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListScaleSetInstancesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param scalesetID
+ if err := r.SetPathParam("scalesetID", o.ScalesetID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/instances/list_scale_set_instances_responses.go b/client/instances/list_scale_set_instances_responses.go
new file mode 100644
index 00000000..a966a9e7
--- /dev/null
+++ b/client/instances/list_scale_set_instances_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListScaleSetInstancesReader is a Reader for the ListScaleSetInstances structure.
+type ListScaleSetInstancesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListScaleSetInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListScaleSetInstancesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListScaleSetInstancesDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListScaleSetInstancesOK creates a ListScaleSetInstancesOK with default headers values
+func NewListScaleSetInstancesOK() *ListScaleSetInstancesOK {
+ return &ListScaleSetInstancesOK{}
+}
+
+/*
+ListScaleSetInstancesOK describes a response with status code 200, with default header values.
+
+Instances
+*/
+type ListScaleSetInstancesOK struct {
+ Payload garm_params.Instances
+}
+
+// IsSuccess returns true when this list scale set instances o k response has a 2xx status code
+func (o *ListScaleSetInstancesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list scale set instances o k response has a 3xx status code
+func (o *ListScaleSetInstancesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list scale set instances o k response has a 4xx status code
+func (o *ListScaleSetInstancesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list scale set instances o k response has a 5xx status code
+func (o *ListScaleSetInstancesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list scale set instances o k response a status code equal to that given
+func (o *ListScaleSetInstancesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list scale set instances o k response
+func (o *ListScaleSetInstancesOK) Code() int {
+ return 200
+}
+
+func (o *ListScaleSetInstancesOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}/instances][%d] listScaleSetInstancesOK %s", 200, payload)
+}
+
+func (o *ListScaleSetInstancesOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}/instances][%d] listScaleSetInstancesOK %s", 200, payload)
+}
+
+func (o *ListScaleSetInstancesOK) GetPayload() garm_params.Instances {
+ return o.Payload
+}
+
+func (o *ListScaleSetInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListScaleSetInstancesDefault creates a ListScaleSetInstancesDefault with default headers values
+func NewListScaleSetInstancesDefault(code int) *ListScaleSetInstancesDefault {
+ return &ListScaleSetInstancesDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListScaleSetInstancesDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListScaleSetInstancesDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list scale set instances default response has a 2xx status code
+func (o *ListScaleSetInstancesDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list scale set instances default response has a 3xx status code
+func (o *ListScaleSetInstancesDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list scale set instances default response has a 4xx status code
+func (o *ListScaleSetInstancesDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list scale set instances default response has a 5xx status code
+func (o *ListScaleSetInstancesDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list scale set instances default response a status code equal to that given
+func (o *ListScaleSetInstancesDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list scale set instances default response
+func (o *ListScaleSetInstancesDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListScaleSetInstancesDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}/instances][%d] ListScaleSetInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListScaleSetInstancesDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}/instances][%d] ListScaleSetInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListScaleSetInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListScaleSetInstancesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/jobs/jobs_client.go b/client/jobs/jobs_client.go
index 1058c5c7..0e1e1399 100644
--- a/client/jobs/jobs_client.go
+++ b/client/jobs/jobs_client.go
@@ -9,6 +9,7 @@ import (
"fmt"
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -17,6 +18,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new jobs API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new jobs API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for jobs API
*/
@@ -25,7 +51,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
diff --git a/client/jobs/list_jobs_responses.go b/client/jobs/list_jobs_responses.go
index 63282dd4..1b8c445a 100644
--- a/client/jobs/list_jobs_responses.go
+++ b/client/jobs/list_jobs_responses.go
@@ -6,6 +6,7 @@ package jobs
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -86,11 +87,13 @@ func (o *ListJobsOK) Code() int {
}
func (o *ListJobsOK) Error() string {
- return fmt.Sprintf("[GET /jobs][%d] listJobsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /jobs][%d] listJobsOK %s", 200, payload)
}
func (o *ListJobsOK) String() string {
- return fmt.Sprintf("[GET /jobs][%d] listJobsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /jobs][%d] listJobsOK %s", 200, payload)
}
func (o *ListJobsOK) GetPayload() garm_params.Jobs {
@@ -152,11 +155,13 @@ func (o *ListJobsBadRequest) Code() int {
}
func (o *ListJobsBadRequest) Error() string {
- return fmt.Sprintf("[GET /jobs][%d] listJobsBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /jobs][%d] listJobsBadRequest %s", 400, payload)
}
func (o *ListJobsBadRequest) String() string {
- return fmt.Sprintf("[GET /jobs][%d] listJobsBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /jobs][%d] listJobsBadRequest %s", 400, payload)
}
func (o *ListJobsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/login/login_client.go b/client/login/login_client.go
index d976042d..0e9f53de 100644
--- a/client/login/login_client.go
+++ b/client/login/login_client.go
@@ -9,6 +9,7 @@ import (
"fmt"
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -17,6 +18,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new login API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new login API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for login API
*/
@@ -25,7 +51,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
diff --git a/client/login/login_responses.go b/client/login/login_responses.go
index 7506e909..7aae2a69 100644
--- a/client/login/login_responses.go
+++ b/client/login/login_responses.go
@@ -6,6 +6,7 @@ package login
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -86,11 +87,13 @@ func (o *LoginOK) Code() int {
}
func (o *LoginOK) Error() string {
- return fmt.Sprintf("[POST /auth/login][%d] loginOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /auth/login][%d] loginOK %s", 200, payload)
}
func (o *LoginOK) String() string {
- return fmt.Sprintf("[POST /auth/login][%d] loginOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /auth/login][%d] loginOK %s", 200, payload)
}
func (o *LoginOK) GetPayload() garm_params.JWTResponse {
@@ -152,11 +155,13 @@ func (o *LoginBadRequest) Code() int {
}
func (o *LoginBadRequest) Error() string {
- return fmt.Sprintf("[POST /auth/login][%d] loginBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /auth/login][%d] loginBadRequest %s", 400, payload)
}
func (o *LoginBadRequest) String() string {
- return fmt.Sprintf("[POST /auth/login][%d] loginBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /auth/login][%d] loginBadRequest %s", 400, payload)
}
func (o *LoginBadRequest) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/metrics_token/get_metrics_token_responses.go b/client/metrics_token/get_metrics_token_responses.go
index 0625afb0..ea371cc4 100644
--- a/client/metrics_token/get_metrics_token_responses.go
+++ b/client/metrics_token/get_metrics_token_responses.go
@@ -6,6 +6,7 @@ package metrics_token
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -86,11 +87,13 @@ func (o *GetMetricsTokenOK) Code() int {
}
func (o *GetMetricsTokenOK) Error() string {
- return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenOK %s", 200, payload)
}
func (o *GetMetricsTokenOK) String() string {
- return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenOK %s", 200, payload)
}
func (o *GetMetricsTokenOK) GetPayload() garm_params.JWTResponse {
@@ -152,11 +155,13 @@ func (o *GetMetricsTokenUnauthorized) Code() int {
}
func (o *GetMetricsTokenUnauthorized) Error() string {
- return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenUnauthorized %+v", 401, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenUnauthorized %s", 401, payload)
}
func (o *GetMetricsTokenUnauthorized) String() string {
- return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenUnauthorized %+v", 401, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenUnauthorized %s", 401, payload)
}
func (o *GetMetricsTokenUnauthorized) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/metrics_token/metrics_token_client.go b/client/metrics_token/metrics_token_client.go
index 60baae75..d4d7c2d9 100644
--- a/client/metrics_token/metrics_token_client.go
+++ b/client/metrics_token/metrics_token_client.go
@@ -9,6 +9,7 @@ import (
"fmt"
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -17,6 +18,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new metrics token API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new metrics token API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for metrics token API
*/
@@ -25,7 +51,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
diff --git a/client/organizations/create_org_pool_responses.go b/client/organizations/create_org_pool_responses.go
index d9d77022..7ebf9a07 100644
--- a/client/organizations/create_org_pool_responses.go
+++ b/client/organizations/create_org_pool_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *CreateOrgPoolOK) Code() int {
}
func (o *CreateOrgPoolOK) Error() string {
- return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] createOrgPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] createOrgPoolOK %s", 200, payload)
}
func (o *CreateOrgPoolOK) String() string {
- return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] createOrgPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] createOrgPoolOK %s", 200, payload)
}
func (o *CreateOrgPoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *CreateOrgPoolDefault) Code() int {
}
func (o *CreateOrgPoolDefault) Error() string {
- return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] CreateOrgPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] CreateOrgPool default %s", o._statusCode, payload)
}
func (o *CreateOrgPoolDefault) String() string {
- return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] CreateOrgPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] CreateOrgPool default %s", o._statusCode, payload)
}
func (o *CreateOrgPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/create_org_responses.go b/client/organizations/create_org_responses.go
index f8858716..e960e253 100644
--- a/client/organizations/create_org_responses.go
+++ b/client/organizations/create_org_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *CreateOrgOK) Code() int {
}
func (o *CreateOrgOK) Error() string {
- return fmt.Sprintf("[POST /organizations][%d] createOrgOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations][%d] createOrgOK %s", 200, payload)
}
func (o *CreateOrgOK) String() string {
- return fmt.Sprintf("[POST /organizations][%d] createOrgOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations][%d] createOrgOK %s", 200, payload)
}
func (o *CreateOrgOK) GetPayload() garm_params.Organization {
@@ -157,11 +160,13 @@ func (o *CreateOrgDefault) Code() int {
}
func (o *CreateOrgDefault) Error() string {
- return fmt.Sprintf("[POST /organizations][%d] CreateOrg default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations][%d] CreateOrg default %s", o._statusCode, payload)
}
func (o *CreateOrgDefault) String() string {
- return fmt.Sprintf("[POST /organizations][%d] CreateOrg default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations][%d] CreateOrg default %s", o._statusCode, payload)
}
func (o *CreateOrgDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/create_org_scale_set_parameters.go b/client/organizations/create_org_scale_set_parameters.go
new file mode 100644
index 00000000..0e222693
--- /dev/null
+++ b/client/organizations/create_org_scale_set_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateOrgScaleSetParams creates a new CreateOrgScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateOrgScaleSetParams() *CreateOrgScaleSetParams {
+ return &CreateOrgScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateOrgScaleSetParamsWithTimeout creates a new CreateOrgScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewCreateOrgScaleSetParamsWithTimeout(timeout time.Duration) *CreateOrgScaleSetParams {
+ return &CreateOrgScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateOrgScaleSetParamsWithContext creates a new CreateOrgScaleSetParams object
+// with the ability to set a context for a request.
+func NewCreateOrgScaleSetParamsWithContext(ctx context.Context) *CreateOrgScaleSetParams {
+ return &CreateOrgScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateOrgScaleSetParamsWithHTTPClient creates a new CreateOrgScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateOrgScaleSetParamsWithHTTPClient(client *http.Client) *CreateOrgScaleSetParams {
+ return &CreateOrgScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateOrgScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the create org scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateOrgScaleSetParams struct {
+
+ /* Body.
+
+ Parameters used when creating the organization scale set.
+ */
+ Body garm_params.CreateScaleSetParams
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create org scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateOrgScaleSetParams) WithDefaults() *CreateOrgScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create org scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateOrgScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithTimeout(timeout time.Duration) *CreateOrgScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithContext(ctx context.Context) *CreateOrgScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithHTTPClient(client *http.Client) *CreateOrgScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithBody(body garm_params.CreateScaleSetParams) *CreateOrgScaleSetParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetBody(body garm_params.CreateScaleSetParams) {
+ o.Body = body
+}
+
+// WithOrgID adds the orgID to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithOrgID(orgID string) *CreateOrgScaleSetParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateOrgScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/create_org_scale_set_responses.go b/client/organizations/create_org_scale_set_responses.go
new file mode 100644
index 00000000..3a91d03f
--- /dev/null
+++ b/client/organizations/create_org_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateOrgScaleSetReader is a Reader for the CreateOrgScaleSet structure.
+type CreateOrgScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateOrgScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateOrgScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateOrgScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateOrgScaleSetOK creates a CreateOrgScaleSetOK with default headers values
+func NewCreateOrgScaleSetOK() *CreateOrgScaleSetOK {
+ return &CreateOrgScaleSetOK{}
+}
+
+/*
+CreateOrgScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type CreateOrgScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this create org scale set o k response has a 2xx status code
+func (o *CreateOrgScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create org scale set o k response has a 3xx status code
+func (o *CreateOrgScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create org scale set o k response has a 4xx status code
+func (o *CreateOrgScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create org scale set o k response has a 5xx status code
+func (o *CreateOrgScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create org scale set o k response a status code equal to that given
+func (o *CreateOrgScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create org scale set o k response
+func (o *CreateOrgScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *CreateOrgScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/scalesets][%d] createOrgScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateOrgScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/scalesets][%d] createOrgScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateOrgScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *CreateOrgScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateOrgScaleSetDefault creates a CreateOrgScaleSetDefault with default headers values
+func NewCreateOrgScaleSetDefault(code int) *CreateOrgScaleSetDefault {
+ return &CreateOrgScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateOrgScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateOrgScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create org scale set default response has a 2xx status code
+func (o *CreateOrgScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create org scale set default response has a 3xx status code
+func (o *CreateOrgScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create org scale set default response has a 4xx status code
+func (o *CreateOrgScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create org scale set default response has a 5xx status code
+func (o *CreateOrgScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create org scale set default response a status code equal to that given
+func (o *CreateOrgScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create org scale set default response
+func (o *CreateOrgScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateOrgScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/scalesets][%d] CreateOrgScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateOrgScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/scalesets][%d] CreateOrgScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateOrgScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateOrgScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/delete_org_parameters.go b/client/organizations/delete_org_parameters.go
index 92e27495..daf36813 100644
--- a/client/organizations/delete_org_parameters.go
+++ b/client/organizations/delete_org_parameters.go
@@ -14,6 +14,7 @@ import (
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
)
// NewDeleteOrgParams creates a new DeleteOrgParams object,
@@ -61,6 +62,12 @@ DeleteOrgParams contains all the parameters to send to the API endpoint
*/
type DeleteOrgParams struct {
+ /* KeepWebhook.
+
+ If true and a webhook is installed for this organization, it will not be removed.
+ */
+ KeepWebhook *bool
+
/* OrgID.
ID of the organization to delete.
@@ -120,6 +127,17 @@ func (o *DeleteOrgParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
+// WithKeepWebhook adds the keepWebhook to the delete org params
+func (o *DeleteOrgParams) WithKeepWebhook(keepWebhook *bool) *DeleteOrgParams {
+ o.SetKeepWebhook(keepWebhook)
+ return o
+}
+
+// SetKeepWebhook adds the keepWebhook to the delete org params
+func (o *DeleteOrgParams) SetKeepWebhook(keepWebhook *bool) {
+ o.KeepWebhook = keepWebhook
+}
+
// WithOrgID adds the orgID to the delete org params
func (o *DeleteOrgParams) WithOrgID(orgID string) *DeleteOrgParams {
o.SetOrgID(orgID)
@@ -139,6 +157,23 @@ func (o *DeleteOrgParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Reg
}
var res []error
+ if o.KeepWebhook != nil {
+
+ // query param keepWebhook
+ var qrKeepWebhook bool
+
+ if o.KeepWebhook != nil {
+ qrKeepWebhook = *o.KeepWebhook
+ }
+ qKeepWebhook := swag.FormatBool(qrKeepWebhook)
+ if qKeepWebhook != "" {
+
+ if err := r.SetQueryParam("keepWebhook", qKeepWebhook); err != nil {
+ return err
+ }
+ }
+ }
+
// path param orgID
if err := r.SetPathParam("orgID", o.OrgID); err != nil {
return err
diff --git a/client/organizations/delete_org_pool_responses.go b/client/organizations/delete_org_pool_responses.go
index 82bde7bc..9bca3f30 100644
--- a/client/organizations/delete_org_pool_responses.go
+++ b/client/organizations/delete_org_pool_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -81,11 +82,13 @@ func (o *DeleteOrgPoolDefault) Code() int {
}
func (o *DeleteOrgPoolDefault) Error() string {
- return fmt.Sprintf("[DELETE /organizations/{orgID}/pools/{poolID}][%d] DeleteOrgPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}/pools/{poolID}][%d] DeleteOrgPool default %s", o._statusCode, payload)
}
func (o *DeleteOrgPoolDefault) String() string {
- return fmt.Sprintf("[DELETE /organizations/{orgID}/pools/{poolID}][%d] DeleteOrgPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}/pools/{poolID}][%d] DeleteOrgPool default %s", o._statusCode, payload)
}
func (o *DeleteOrgPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/delete_org_responses.go b/client/organizations/delete_org_responses.go
index 11e5ea2a..87d4ff19 100644
--- a/client/organizations/delete_org_responses.go
+++ b/client/organizations/delete_org_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -81,11 +82,13 @@ func (o *DeleteOrgDefault) Code() int {
}
func (o *DeleteOrgDefault) Error() string {
- return fmt.Sprintf("[DELETE /organizations/{orgID}][%d] DeleteOrg default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}][%d] DeleteOrg default %s", o._statusCode, payload)
}
func (o *DeleteOrgDefault) String() string {
- return fmt.Sprintf("[DELETE /organizations/{orgID}][%d] DeleteOrg default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}][%d] DeleteOrg default %s", o._statusCode, payload)
}
func (o *DeleteOrgDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/get_org_pool_responses.go b/client/organizations/get_org_pool_responses.go
index 0036870f..dba3ed27 100644
--- a/client/organizations/get_org_pool_responses.go
+++ b/client/organizations/get_org_pool_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *GetOrgPoolOK) Code() int {
}
func (o *GetOrgPoolOK) Error() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] getOrgPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] getOrgPoolOK %s", 200, payload)
}
func (o *GetOrgPoolOK) String() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] getOrgPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] getOrgPoolOK %s", 200, payload)
}
func (o *GetOrgPoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *GetOrgPoolDefault) Code() int {
}
func (o *GetOrgPoolDefault) Error() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] GetOrgPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] GetOrgPool default %s", o._statusCode, payload)
}
func (o *GetOrgPoolDefault) String() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] GetOrgPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] GetOrgPool default %s", o._statusCode, payload)
}
func (o *GetOrgPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/get_org_responses.go b/client/organizations/get_org_responses.go
index 926a088d..2c6df58d 100644
--- a/client/organizations/get_org_responses.go
+++ b/client/organizations/get_org_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *GetOrgOK) Code() int {
}
func (o *GetOrgOK) Error() string {
- return fmt.Sprintf("[GET /organizations/{orgID}][%d] getOrgOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}][%d] getOrgOK %s", 200, payload)
}
func (o *GetOrgOK) String() string {
- return fmt.Sprintf("[GET /organizations/{orgID}][%d] getOrgOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}][%d] getOrgOK %s", 200, payload)
}
func (o *GetOrgOK) GetPayload() garm_params.Organization {
@@ -157,11 +160,13 @@ func (o *GetOrgDefault) Code() int {
}
func (o *GetOrgDefault) Error() string {
- return fmt.Sprintf("[GET /organizations/{orgID}][%d] GetOrg default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}][%d] GetOrg default %s", o._statusCode, payload)
}
func (o *GetOrgDefault) String() string {
- return fmt.Sprintf("[GET /organizations/{orgID}][%d] GetOrg default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}][%d] GetOrg default %s", o._statusCode, payload)
}
func (o *GetOrgDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/get_org_webhook_info_parameters.go b/client/organizations/get_org_webhook_info_parameters.go
new file mode 100644
index 00000000..fe67c584
--- /dev/null
+++ b/client/organizations/get_org_webhook_info_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetOrgWebhookInfoParams creates a new GetOrgWebhookInfoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetOrgWebhookInfoParams() *GetOrgWebhookInfoParams {
+ return &GetOrgWebhookInfoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetOrgWebhookInfoParamsWithTimeout creates a new GetOrgWebhookInfoParams object
+// with the ability to set a timeout on a request.
+func NewGetOrgWebhookInfoParamsWithTimeout(timeout time.Duration) *GetOrgWebhookInfoParams {
+ return &GetOrgWebhookInfoParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetOrgWebhookInfoParamsWithContext creates a new GetOrgWebhookInfoParams object
+// with the ability to set a context for a request.
+func NewGetOrgWebhookInfoParamsWithContext(ctx context.Context) *GetOrgWebhookInfoParams {
+ return &GetOrgWebhookInfoParams{
+ Context: ctx,
+ }
+}
+
+// NewGetOrgWebhookInfoParamsWithHTTPClient creates a new GetOrgWebhookInfoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetOrgWebhookInfoParamsWithHTTPClient(client *http.Client) *GetOrgWebhookInfoParams {
+ return &GetOrgWebhookInfoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetOrgWebhookInfoParams contains all the parameters to send to the API endpoint
+
+ for the get org webhook info operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetOrgWebhookInfoParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get org webhook info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetOrgWebhookInfoParams) WithDefaults() *GetOrgWebhookInfoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get org webhook info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetOrgWebhookInfoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) WithTimeout(timeout time.Duration) *GetOrgWebhookInfoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) WithContext(ctx context.Context) *GetOrgWebhookInfoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) WithHTTPClient(client *http.Client) *GetOrgWebhookInfoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) WithOrgID(orgID string) *GetOrgWebhookInfoParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetOrgWebhookInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/get_org_webhook_info_responses.go b/client/organizations/get_org_webhook_info_responses.go
new file mode 100644
index 00000000..9cebf511
--- /dev/null
+++ b/client/organizations/get_org_webhook_info_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetOrgWebhookInfoReader is a Reader for the GetOrgWebhookInfo structure.
+type GetOrgWebhookInfoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetOrgWebhookInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetOrgWebhookInfoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetOrgWebhookInfoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetOrgWebhookInfoOK creates a GetOrgWebhookInfoOK with default headers values
+func NewGetOrgWebhookInfoOK() *GetOrgWebhookInfoOK {
+ return &GetOrgWebhookInfoOK{}
+}
+
+/*
+GetOrgWebhookInfoOK describes a response with status code 200, with default header values.
+
+HookInfo
+*/
+type GetOrgWebhookInfoOK struct {
+ Payload garm_params.HookInfo
+}
+
+// IsSuccess returns true when this get org webhook info o k response has a 2xx status code
+func (o *GetOrgWebhookInfoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get org webhook info o k response has a 3xx status code
+func (o *GetOrgWebhookInfoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get org webhook info o k response has a 4xx status code
+func (o *GetOrgWebhookInfoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get org webhook info o k response has a 5xx status code
+func (o *GetOrgWebhookInfoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get org webhook info o k response a status code equal to that given
+func (o *GetOrgWebhookInfoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get org webhook info o k response
+func (o *GetOrgWebhookInfoOK) Code() int {
+ return 200
+}
+
+func (o *GetOrgWebhookInfoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/webhook][%d] getOrgWebhookInfoOK %s", 200, payload)
+}
+
+func (o *GetOrgWebhookInfoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/webhook][%d] getOrgWebhookInfoOK %s", 200, payload)
+}
+
+func (o *GetOrgWebhookInfoOK) GetPayload() garm_params.HookInfo {
+ return o.Payload
+}
+
+func (o *GetOrgWebhookInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetOrgWebhookInfoDefault creates a GetOrgWebhookInfoDefault with default headers values
+func NewGetOrgWebhookInfoDefault(code int) *GetOrgWebhookInfoDefault {
+ return &GetOrgWebhookInfoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetOrgWebhookInfoDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetOrgWebhookInfoDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get org webhook info default response has a 2xx status code
+func (o *GetOrgWebhookInfoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get org webhook info default response has a 3xx status code
+func (o *GetOrgWebhookInfoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get org webhook info default response has a 4xx status code
+func (o *GetOrgWebhookInfoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get org webhook info default response has a 5xx status code
+func (o *GetOrgWebhookInfoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get org webhook info default response a status code equal to that given
+func (o *GetOrgWebhookInfoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get org webhook info default response
+func (o *GetOrgWebhookInfoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetOrgWebhookInfoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/webhook][%d] GetOrgWebhookInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetOrgWebhookInfoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/webhook][%d] GetOrgWebhookInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetOrgWebhookInfoDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetOrgWebhookInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/install_org_webhook_parameters.go b/client/organizations/install_org_webhook_parameters.go
new file mode 100644
index 00000000..b28de742
--- /dev/null
+++ b/client/organizations/install_org_webhook_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewInstallOrgWebhookParams creates a new InstallOrgWebhookParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewInstallOrgWebhookParams() *InstallOrgWebhookParams {
+ return &InstallOrgWebhookParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewInstallOrgWebhookParamsWithTimeout creates a new InstallOrgWebhookParams object
+// with the ability to set a timeout on a request.
+func NewInstallOrgWebhookParamsWithTimeout(timeout time.Duration) *InstallOrgWebhookParams {
+ return &InstallOrgWebhookParams{
+ timeout: timeout,
+ }
+}
+
+// NewInstallOrgWebhookParamsWithContext creates a new InstallOrgWebhookParams object
+// with the ability to set a context for a request.
+func NewInstallOrgWebhookParamsWithContext(ctx context.Context) *InstallOrgWebhookParams {
+ return &InstallOrgWebhookParams{
+ Context: ctx,
+ }
+}
+
+// NewInstallOrgWebhookParamsWithHTTPClient creates a new InstallOrgWebhookParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewInstallOrgWebhookParamsWithHTTPClient(client *http.Client) *InstallOrgWebhookParams {
+ return &InstallOrgWebhookParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+InstallOrgWebhookParams contains all the parameters to send to the API endpoint
+
+ for the install org webhook operation.
+
+ Typically these are written to a http.Request.
+*/
+type InstallOrgWebhookParams struct {
+
+ /* Body.
+
+ Parameters used when creating the organization webhook.
+ */
+ Body garm_params.InstallWebhookParams
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the install org webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *InstallOrgWebhookParams) WithDefaults() *InstallOrgWebhookParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the install org webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *InstallOrgWebhookParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the install org webhook params
+func (o *InstallOrgWebhookParams) WithTimeout(timeout time.Duration) *InstallOrgWebhookParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the install org webhook params
+func (o *InstallOrgWebhookParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the install org webhook params
+func (o *InstallOrgWebhookParams) WithContext(ctx context.Context) *InstallOrgWebhookParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the install org webhook params
+func (o *InstallOrgWebhookParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the install org webhook params
+func (o *InstallOrgWebhookParams) WithHTTPClient(client *http.Client) *InstallOrgWebhookParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the install org webhook params
+func (o *InstallOrgWebhookParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the install org webhook params
+func (o *InstallOrgWebhookParams) WithBody(body garm_params.InstallWebhookParams) *InstallOrgWebhookParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the install org webhook params
+func (o *InstallOrgWebhookParams) SetBody(body garm_params.InstallWebhookParams) {
+ o.Body = body
+}
+
+// WithOrgID adds the orgID to the install org webhook params
+func (o *InstallOrgWebhookParams) WithOrgID(orgID string) *InstallOrgWebhookParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the install org webhook params
+func (o *InstallOrgWebhookParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *InstallOrgWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/install_org_webhook_responses.go b/client/organizations/install_org_webhook_responses.go
new file mode 100644
index 00000000..338b7c08
--- /dev/null
+++ b/client/organizations/install_org_webhook_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// InstallOrgWebhookReader is a Reader for the InstallOrgWebhook structure.
+type InstallOrgWebhookReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *InstallOrgWebhookReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewInstallOrgWebhookOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewInstallOrgWebhookDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewInstallOrgWebhookOK creates a InstallOrgWebhookOK with default headers values
+func NewInstallOrgWebhookOK() *InstallOrgWebhookOK {
+ return &InstallOrgWebhookOK{}
+}
+
+/*
+InstallOrgWebhookOK describes a response with status code 200, with default header values.
+
+HookInfo
+*/
+type InstallOrgWebhookOK struct {
+ Payload garm_params.HookInfo
+}
+
+// IsSuccess returns true when this install org webhook o k response has a 2xx status code
+func (o *InstallOrgWebhookOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this install org webhook o k response has a 3xx status code
+func (o *InstallOrgWebhookOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this install org webhook o k response has a 4xx status code
+func (o *InstallOrgWebhookOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this install org webhook o k response has a 5xx status code
+func (o *InstallOrgWebhookOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this install org webhook o k response a status code equal to that given
+func (o *InstallOrgWebhookOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the install org webhook o k response
+func (o *InstallOrgWebhookOK) Code() int {
+ return 200
+}
+
+func (o *InstallOrgWebhookOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/webhook][%d] installOrgWebhookOK %s", 200, payload)
+}
+
+func (o *InstallOrgWebhookOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/webhook][%d] installOrgWebhookOK %s", 200, payload)
+}
+
+func (o *InstallOrgWebhookOK) GetPayload() garm_params.HookInfo {
+ return o.Payload
+}
+
+func (o *InstallOrgWebhookOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewInstallOrgWebhookDefault creates a InstallOrgWebhookDefault with default headers values
+func NewInstallOrgWebhookDefault(code int) *InstallOrgWebhookDefault {
+ return &InstallOrgWebhookDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+InstallOrgWebhookDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type InstallOrgWebhookDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this install org webhook default response has a 2xx status code
+func (o *InstallOrgWebhookDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this install org webhook default response has a 3xx status code
+func (o *InstallOrgWebhookDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this install org webhook default response has a 4xx status code
+func (o *InstallOrgWebhookDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this install org webhook default response has a 5xx status code
+func (o *InstallOrgWebhookDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this install org webhook default response a status code equal to that given
+func (o *InstallOrgWebhookDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the install org webhook default response
+func (o *InstallOrgWebhookDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *InstallOrgWebhookDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/webhook][%d] InstallOrgWebhook default %s", o._statusCode, payload)
+}
+
+func (o *InstallOrgWebhookDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/webhook][%d] InstallOrgWebhook default %s", o._statusCode, payload)
+}
+
+func (o *InstallOrgWebhookDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *InstallOrgWebhookDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/list_org_instances_responses.go b/client/organizations/list_org_instances_responses.go
index 1c0f48e0..2c6ad86e 100644
--- a/client/organizations/list_org_instances_responses.go
+++ b/client/organizations/list_org_instances_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListOrgInstancesOK) Code() int {
}
func (o *ListOrgInstancesOK) Error() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] listOrgInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] listOrgInstancesOK %s", 200, payload)
}
func (o *ListOrgInstancesOK) String() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] listOrgInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] listOrgInstancesOK %s", 200, payload)
}
func (o *ListOrgInstancesOK) GetPayload() garm_params.Instances {
@@ -157,11 +160,13 @@ func (o *ListOrgInstancesDefault) Code() int {
}
func (o *ListOrgInstancesDefault) Error() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] ListOrgInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] ListOrgInstances default %s", o._statusCode, payload)
}
func (o *ListOrgInstancesDefault) String() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] ListOrgInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] ListOrgInstances default %s", o._statusCode, payload)
}
func (o *ListOrgInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/list_org_pools_responses.go b/client/organizations/list_org_pools_responses.go
index b39944fd..cdbe7f7b 100644
--- a/client/organizations/list_org_pools_responses.go
+++ b/client/organizations/list_org_pools_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListOrgPoolsOK) Code() int {
}
func (o *ListOrgPoolsOK) Error() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] listOrgPoolsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] listOrgPoolsOK %s", 200, payload)
}
func (o *ListOrgPoolsOK) String() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] listOrgPoolsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] listOrgPoolsOK %s", 200, payload)
}
func (o *ListOrgPoolsOK) GetPayload() garm_params.Pools {
@@ -157,11 +160,13 @@ func (o *ListOrgPoolsDefault) Code() int {
}
func (o *ListOrgPoolsDefault) Error() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] ListOrgPools default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] ListOrgPools default %s", o._statusCode, payload)
}
func (o *ListOrgPoolsDefault) String() string {
- return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] ListOrgPools default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] ListOrgPools default %s", o._statusCode, payload)
}
func (o *ListOrgPoolsDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/list_org_scale_sets_parameters.go b/client/organizations/list_org_scale_sets_parameters.go
new file mode 100644
index 00000000..711ec788
--- /dev/null
+++ b/client/organizations/list_org_scale_sets_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListOrgScaleSetsParams creates a new ListOrgScaleSetsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListOrgScaleSetsParams() *ListOrgScaleSetsParams {
+ return &ListOrgScaleSetsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListOrgScaleSetsParamsWithTimeout creates a new ListOrgScaleSetsParams object
+// with the ability to set a timeout on a request.
+func NewListOrgScaleSetsParamsWithTimeout(timeout time.Duration) *ListOrgScaleSetsParams {
+ return &ListOrgScaleSetsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListOrgScaleSetsParamsWithContext creates a new ListOrgScaleSetsParams object
+// with the ability to set a context for a request.
+func NewListOrgScaleSetsParamsWithContext(ctx context.Context) *ListOrgScaleSetsParams {
+ return &ListOrgScaleSetsParams{
+ Context: ctx,
+ }
+}
+
+// NewListOrgScaleSetsParamsWithHTTPClient creates a new ListOrgScaleSetsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListOrgScaleSetsParamsWithHTTPClient(client *http.Client) *ListOrgScaleSetsParams {
+ return &ListOrgScaleSetsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListOrgScaleSetsParams contains all the parameters to send to the API endpoint
+
+ for the list org scale sets operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListOrgScaleSetsParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list org scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgScaleSetsParams) WithDefaults() *ListOrgScaleSetsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list org scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgScaleSetsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list org scale sets params
+func (o *ListOrgScaleSetsParams) WithTimeout(timeout time.Duration) *ListOrgScaleSetsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list org scale sets params
+func (o *ListOrgScaleSetsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list org scale sets params
+func (o *ListOrgScaleSetsParams) WithContext(ctx context.Context) *ListOrgScaleSetsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list org scale sets params
+func (o *ListOrgScaleSetsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list org scale sets params
+func (o *ListOrgScaleSetsParams) WithHTTPClient(client *http.Client) *ListOrgScaleSetsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list org scale sets params
+func (o *ListOrgScaleSetsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the list org scale sets params
+func (o *ListOrgScaleSetsParams) WithOrgID(orgID string) *ListOrgScaleSetsParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the list org scale sets params
+func (o *ListOrgScaleSetsParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListOrgScaleSetsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/list_org_scale_sets_responses.go b/client/organizations/list_org_scale_sets_responses.go
new file mode 100644
index 00000000..0b470fa1
--- /dev/null
+++ b/client/organizations/list_org_scale_sets_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListOrgScaleSetsReader is a Reader for the ListOrgScaleSets structure.
+type ListOrgScaleSetsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListOrgScaleSetsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListOrgScaleSetsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListOrgScaleSetsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListOrgScaleSetsOK creates a ListOrgScaleSetsOK with default headers values
+func NewListOrgScaleSetsOK() *ListOrgScaleSetsOK {
+ return &ListOrgScaleSetsOK{}
+}
+
+/*
+ListOrgScaleSetsOK describes a response with status code 200, with default header values.
+
+ScaleSets
+*/
+type ListOrgScaleSetsOK struct {
+ Payload garm_params.ScaleSets
+}
+
+// IsSuccess returns true when this list org scale sets o k response has a 2xx status code
+func (o *ListOrgScaleSetsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list org scale sets o k response has a 3xx status code
+func (o *ListOrgScaleSetsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list org scale sets o k response has a 4xx status code
+func (o *ListOrgScaleSetsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list org scale sets o k response has a 5xx status code
+func (o *ListOrgScaleSetsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list org scale sets o k response a status code equal to that given
+func (o *ListOrgScaleSetsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list org scale sets o k response
+func (o *ListOrgScaleSetsOK) Code() int {
+ return 200
+}
+
+func (o *ListOrgScaleSetsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/scalesets][%d] listOrgScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListOrgScaleSetsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/scalesets][%d] listOrgScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListOrgScaleSetsOK) GetPayload() garm_params.ScaleSets {
+ return o.Payload
+}
+
+func (o *ListOrgScaleSetsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListOrgScaleSetsDefault creates a ListOrgScaleSetsDefault with default headers values
+func NewListOrgScaleSetsDefault(code int) *ListOrgScaleSetsDefault {
+ return &ListOrgScaleSetsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListOrgScaleSetsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListOrgScaleSetsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list org scale sets default response has a 2xx status code
+func (o *ListOrgScaleSetsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list org scale sets default response has a 3xx status code
+func (o *ListOrgScaleSetsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list org scale sets default response has a 4xx status code
+func (o *ListOrgScaleSetsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list org scale sets default response has a 5xx status code
+func (o *ListOrgScaleSetsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list org scale sets default response a status code equal to that given
+func (o *ListOrgScaleSetsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list org scale sets default response
+func (o *ListOrgScaleSetsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListOrgScaleSetsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/scalesets][%d] ListOrgScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgScaleSetsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/scalesets][%d] ListOrgScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgScaleSetsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListOrgScaleSetsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/list_orgs_parameters.go b/client/organizations/list_orgs_parameters.go
index 1441722f..af4c19c8 100644
--- a/client/organizations/list_orgs_parameters.go
+++ b/client/organizations/list_orgs_parameters.go
@@ -60,6 +60,19 @@ ListOrgsParams contains all the parameters to send to the API endpoint
Typically these are written to a http.Request.
*/
type ListOrgsParams struct {
+
+ /* Endpoint.
+
+ Exact endpoint name to filter by
+ */
+ Endpoint *string
+
+ /* Name.
+
+ Exact organization name to filter by
+ */
+ Name *string
+
timeout time.Duration
Context context.Context
HTTPClient *http.Client
@@ -113,6 +126,28 @@ func (o *ListOrgsParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
+// WithEndpoint adds the endpoint to the list orgs params
+func (o *ListOrgsParams) WithEndpoint(endpoint *string) *ListOrgsParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the list orgs params
+func (o *ListOrgsParams) SetEndpoint(endpoint *string) {
+ o.Endpoint = endpoint
+}
+
+// WithName adds the name to the list orgs params
+func (o *ListOrgsParams) WithName(name *string) *ListOrgsParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the list orgs params
+func (o *ListOrgsParams) SetName(name *string) {
+ o.Name = name
+}
+
// WriteToRequest writes these params to a swagger request
func (o *ListOrgsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
@@ -121,6 +156,40 @@ func (o *ListOrgsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Regi
}
var res []error
+ if o.Endpoint != nil {
+
+ // query param endpoint
+ var qrEndpoint string
+
+ if o.Endpoint != nil {
+ qrEndpoint = *o.Endpoint
+ }
+ qEndpoint := qrEndpoint
+ if qEndpoint != "" {
+
+ if err := r.SetQueryParam("endpoint", qEndpoint); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Name != nil {
+
+ // query param name
+ var qrName string
+
+ if o.Name != nil {
+ qrName = *o.Name
+ }
+ qName := qrName
+ if qName != "" {
+
+ if err := r.SetQueryParam("name", qName); err != nil {
+ return err
+ }
+ }
+ }
+
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
diff --git a/client/organizations/list_orgs_responses.go b/client/organizations/list_orgs_responses.go
index b29343e3..c72f11cb 100644
--- a/client/organizations/list_orgs_responses.go
+++ b/client/organizations/list_orgs_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListOrgsOK) Code() int {
}
func (o *ListOrgsOK) Error() string {
- return fmt.Sprintf("[GET /organizations][%d] listOrgsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations][%d] listOrgsOK %s", 200, payload)
}
func (o *ListOrgsOK) String() string {
- return fmt.Sprintf("[GET /organizations][%d] listOrgsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations][%d] listOrgsOK %s", 200, payload)
}
func (o *ListOrgsOK) GetPayload() garm_params.Organizations {
@@ -157,11 +160,13 @@ func (o *ListOrgsDefault) Code() int {
}
func (o *ListOrgsDefault) Error() string {
- return fmt.Sprintf("[GET /organizations][%d] ListOrgs default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations][%d] ListOrgs default %s", o._statusCode, payload)
}
func (o *ListOrgsDefault) String() string {
- return fmt.Sprintf("[GET /organizations][%d] ListOrgs default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations][%d] ListOrgs default %s", o._statusCode, payload)
}
func (o *ListOrgsDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/organizations_client.go b/client/organizations/organizations_client.go
index 3d75cd86..cd3e1211 100644
--- a/client/organizations/organizations_client.go
+++ b/client/organizations/organizations_client.go
@@ -7,6 +7,7 @@ package organizations
import (
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -15,6 +16,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new organizations API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new organizations API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for organizations API
*/
@@ -23,7 +49,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
@@ -32,6 +58,8 @@ type ClientService interface {
CreateOrgPool(params *CreateOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgPoolOK, error)
+ CreateOrgScaleSet(params *CreateOrgScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgScaleSetOK, error)
+
DeleteOrg(params *DeleteOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
DeleteOrgPool(params *DeleteOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
@@ -40,12 +68,20 @@ type ClientService interface {
GetOrgPool(params *GetOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgPoolOK, error)
+ GetOrgWebhookInfo(params *GetOrgWebhookInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgWebhookInfoOK, error)
+
+ InstallOrgWebhook(params *InstallOrgWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*InstallOrgWebhookOK, error)
+
ListOrgInstances(params *ListOrgInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgInstancesOK, error)
ListOrgPools(params *ListOrgPoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgPoolsOK, error)
+ ListOrgScaleSets(params *ListOrgScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgScaleSetsOK, error)
+
ListOrgs(params *ListOrgsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgsOK, error)
+ UninstallOrgWebhook(params *UninstallOrgWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
UpdateOrg(params *UpdateOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateOrgOK, error)
UpdateOrgPool(params *UpdateOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateOrgPoolOK, error)
@@ -129,6 +165,44 @@ func (a *Client) CreateOrgPool(params *CreateOrgPoolParams, authInfo runtime.Cli
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+CreateOrgScaleSet creates organization scale set with the parameters given
+*/
+func (a *Client) CreateOrgScaleSet(params *CreateOrgScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateOrgScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateOrgScaleSet",
+ Method: "POST",
+ PathPattern: "/organizations/{orgID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateOrgScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateOrgScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateOrgScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
/*
DeleteOrg deletes organization by ID
*/
@@ -269,6 +343,84 @@ func (a *Client) GetOrgPool(params *GetOrgPoolParams, authInfo runtime.ClientAut
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+GetOrgWebhookInfo gets information about the g a r m installed webhook on an organization
+*/
+func (a *Client) GetOrgWebhookInfo(params *GetOrgWebhookInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgWebhookInfoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetOrgWebhookInfoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetOrgWebhookInfo",
+ Method: "GET",
+ PathPattern: "/organizations/{orgID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetOrgWebhookInfoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetOrgWebhookInfoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetOrgWebhookInfoDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ InstallOrgWebhook Install the GARM webhook for an organization. The secret configured on the organization will
+
+be used to validate the requests.
+*/
+func (a *Client) InstallOrgWebhook(params *InstallOrgWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*InstallOrgWebhookOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewInstallOrgWebhookParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "InstallOrgWebhook",
+ Method: "POST",
+ PathPattern: "/organizations/{orgID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &InstallOrgWebhookReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*InstallOrgWebhookOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*InstallOrgWebhookDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
/*
ListOrgInstances lists organization instances
*/
@@ -345,6 +497,44 @@ func (a *Client) ListOrgPools(params *ListOrgPoolsParams, authInfo runtime.Clien
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+ListOrgScaleSets lists organization scale sets
+*/
+func (a *Client) ListOrgScaleSets(params *ListOrgScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgScaleSetsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListOrgScaleSetsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListOrgScaleSets",
+ Method: "GET",
+ PathPattern: "/organizations/{orgID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListOrgScaleSetsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListOrgScaleSetsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListOrgScaleSetsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
/*
ListOrgs lists organizations
*/
@@ -383,6 +573,38 @@ func (a *Client) ListOrgs(params *ListOrgsParams, authInfo runtime.ClientAuthInf
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+UninstallOrgWebhook uninstalls organization webhook
+*/
+func (a *Client) UninstallOrgWebhook(params *UninstallOrgWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUninstallOrgWebhookParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UninstallOrgWebhook",
+ Method: "DELETE",
+ PathPattern: "/organizations/{orgID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UninstallOrgWebhookReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
/*
UpdateOrg updates organization with the parameters given
*/
diff --git a/client/organizations/uninstall_org_webhook_parameters.go b/client/organizations/uninstall_org_webhook_parameters.go
new file mode 100644
index 00000000..3914d9c7
--- /dev/null
+++ b/client/organizations/uninstall_org_webhook_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewUninstallOrgWebhookParams creates a new UninstallOrgWebhookParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUninstallOrgWebhookParams() *UninstallOrgWebhookParams {
+ return &UninstallOrgWebhookParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUninstallOrgWebhookParamsWithTimeout creates a new UninstallOrgWebhookParams object
+// with the ability to set a timeout on a request.
+func NewUninstallOrgWebhookParamsWithTimeout(timeout time.Duration) *UninstallOrgWebhookParams {
+ return &UninstallOrgWebhookParams{
+ timeout: timeout,
+ }
+}
+
+// NewUninstallOrgWebhookParamsWithContext creates a new UninstallOrgWebhookParams object
+// with the ability to set a context for a request.
+func NewUninstallOrgWebhookParamsWithContext(ctx context.Context) *UninstallOrgWebhookParams {
+ return &UninstallOrgWebhookParams{
+ Context: ctx,
+ }
+}
+
+// NewUninstallOrgWebhookParamsWithHTTPClient creates a new UninstallOrgWebhookParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUninstallOrgWebhookParamsWithHTTPClient(client *http.Client) *UninstallOrgWebhookParams {
+ return &UninstallOrgWebhookParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UninstallOrgWebhookParams contains all the parameters to send to the API endpoint
+
+ for the uninstall org webhook operation.
+
+ Typically these are written to a http.Request.
+*/
+type UninstallOrgWebhookParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the uninstall org webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UninstallOrgWebhookParams) WithDefaults() *UninstallOrgWebhookParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the uninstall org webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UninstallOrgWebhookParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) WithTimeout(timeout time.Duration) *UninstallOrgWebhookParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) WithContext(ctx context.Context) *UninstallOrgWebhookParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) WithHTTPClient(client *http.Client) *UninstallOrgWebhookParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) WithOrgID(orgID string) *UninstallOrgWebhookParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UninstallOrgWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/uninstall_org_webhook_responses.go b/client/organizations/uninstall_org_webhook_responses.go
new file mode 100644
index 00000000..6f1bceac
--- /dev/null
+++ b/client/organizations/uninstall_org_webhook_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// UninstallOrgWebhookReader is a Reader for the UninstallOrgWebhook structure.
+type UninstallOrgWebhookReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UninstallOrgWebhookReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewUninstallOrgWebhookDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewUninstallOrgWebhookDefault creates a UninstallOrgWebhookDefault with default headers values
+func NewUninstallOrgWebhookDefault(code int) *UninstallOrgWebhookDefault {
+ return &UninstallOrgWebhookDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UninstallOrgWebhookDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UninstallOrgWebhookDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this uninstall org webhook default response has a 2xx status code
+func (o *UninstallOrgWebhookDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this uninstall org webhook default response has a 3xx status code
+func (o *UninstallOrgWebhookDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this uninstall org webhook default response has a 4xx status code
+func (o *UninstallOrgWebhookDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this uninstall org webhook default response has a 5xx status code
+func (o *UninstallOrgWebhookDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this uninstall org webhook default response a status code equal to that given
+func (o *UninstallOrgWebhookDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the uninstall org webhook default response
+func (o *UninstallOrgWebhookDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UninstallOrgWebhookDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}/webhook][%d] UninstallOrgWebhook default %s", o._statusCode, payload)
+}
+
+func (o *UninstallOrgWebhookDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}/webhook][%d] UninstallOrgWebhook default %s", o._statusCode, payload)
+}
+
+func (o *UninstallOrgWebhookDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UninstallOrgWebhookDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/update_org_pool_responses.go b/client/organizations/update_org_pool_responses.go
index 761195aa..cad49146 100644
--- a/client/organizations/update_org_pool_responses.go
+++ b/client/organizations/update_org_pool_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *UpdateOrgPoolOK) Code() int {
}
func (o *UpdateOrgPoolOK) Error() string {
- return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] updateOrgPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] updateOrgPoolOK %s", 200, payload)
}
func (o *UpdateOrgPoolOK) String() string {
- return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] updateOrgPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] updateOrgPoolOK %s", 200, payload)
}
func (o *UpdateOrgPoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *UpdateOrgPoolDefault) Code() int {
}
func (o *UpdateOrgPoolDefault) Error() string {
- return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] UpdateOrgPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] UpdateOrgPool default %s", o._statusCode, payload)
}
func (o *UpdateOrgPoolDefault) String() string {
- return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] UpdateOrgPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] UpdateOrgPool default %s", o._statusCode, payload)
}
func (o *UpdateOrgPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/organizations/update_org_responses.go b/client/organizations/update_org_responses.go
index 2275a545..d6483c54 100644
--- a/client/organizations/update_org_responses.go
+++ b/client/organizations/update_org_responses.go
@@ -6,6 +6,7 @@ package organizations
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *UpdateOrgOK) Code() int {
}
func (o *UpdateOrgOK) Error() string {
- return fmt.Sprintf("[PUT /organizations/{orgID}][%d] updateOrgOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}][%d] updateOrgOK %s", 200, payload)
}
func (o *UpdateOrgOK) String() string {
- return fmt.Sprintf("[PUT /organizations/{orgID}][%d] updateOrgOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}][%d] updateOrgOK %s", 200, payload)
}
func (o *UpdateOrgOK) GetPayload() garm_params.Organization {
@@ -157,11 +160,13 @@ func (o *UpdateOrgDefault) Code() int {
}
func (o *UpdateOrgDefault) Error() string {
- return fmt.Sprintf("[PUT /organizations/{orgID}][%d] UpdateOrg default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}][%d] UpdateOrg default %s", o._statusCode, payload)
}
func (o *UpdateOrgDefault) String() string {
- return fmt.Sprintf("[PUT /organizations/{orgID}][%d] UpdateOrg default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}][%d] UpdateOrg default %s", o._statusCode, payload)
}
func (o *UpdateOrgDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/pools/delete_pool_responses.go b/client/pools/delete_pool_responses.go
index a556eaef..18a3aee3 100644
--- a/client/pools/delete_pool_responses.go
+++ b/client/pools/delete_pool_responses.go
@@ -6,6 +6,7 @@ package pools
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -81,11 +82,13 @@ func (o *DeletePoolDefault) Code() int {
}
func (o *DeletePoolDefault) Error() string {
- return fmt.Sprintf("[DELETE /pools/{poolID}][%d] DeletePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /pools/{poolID}][%d] DeletePool default %s", o._statusCode, payload)
}
func (o *DeletePoolDefault) String() string {
- return fmt.Sprintf("[DELETE /pools/{poolID}][%d] DeletePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /pools/{poolID}][%d] DeletePool default %s", o._statusCode, payload)
}
func (o *DeletePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/pools/get_pool_responses.go b/client/pools/get_pool_responses.go
index 22ee22d5..8638dd37 100644
--- a/client/pools/get_pool_responses.go
+++ b/client/pools/get_pool_responses.go
@@ -6,6 +6,7 @@ package pools
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *GetPoolOK) Code() int {
}
func (o *GetPoolOK) Error() string {
- return fmt.Sprintf("[GET /pools/{poolID}][%d] getPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}][%d] getPoolOK %s", 200, payload)
}
func (o *GetPoolOK) String() string {
- return fmt.Sprintf("[GET /pools/{poolID}][%d] getPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}][%d] getPoolOK %s", 200, payload)
}
func (o *GetPoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *GetPoolDefault) Code() int {
}
func (o *GetPoolDefault) Error() string {
- return fmt.Sprintf("[GET /pools/{poolID}][%d] GetPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}][%d] GetPool default %s", o._statusCode, payload)
}
func (o *GetPoolDefault) String() string {
- return fmt.Sprintf("[GET /pools/{poolID}][%d] GetPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}][%d] GetPool default %s", o._statusCode, payload)
}
func (o *GetPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/pools/list_pools_responses.go b/client/pools/list_pools_responses.go
index fa2701d6..9cc36c8f 100644
--- a/client/pools/list_pools_responses.go
+++ b/client/pools/list_pools_responses.go
@@ -6,6 +6,7 @@ package pools
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListPoolsOK) Code() int {
}
func (o *ListPoolsOK) Error() string {
- return fmt.Sprintf("[GET /pools][%d] listPoolsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools][%d] listPoolsOK %s", 200, payload)
}
func (o *ListPoolsOK) String() string {
- return fmt.Sprintf("[GET /pools][%d] listPoolsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools][%d] listPoolsOK %s", 200, payload)
}
func (o *ListPoolsOK) GetPayload() garm_params.Pools {
@@ -157,11 +160,13 @@ func (o *ListPoolsDefault) Code() int {
}
func (o *ListPoolsDefault) Error() string {
- return fmt.Sprintf("[GET /pools][%d] ListPools default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools][%d] ListPools default %s", o._statusCode, payload)
}
func (o *ListPoolsDefault) String() string {
- return fmt.Sprintf("[GET /pools][%d] ListPools default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools][%d] ListPools default %s", o._statusCode, payload)
}
func (o *ListPoolsDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/pools/pools_client.go b/client/pools/pools_client.go
index f8b18359..604a2e46 100644
--- a/client/pools/pools_client.go
+++ b/client/pools/pools_client.go
@@ -7,6 +7,7 @@ package pools
import (
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -15,6 +16,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new pools API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new pools API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for pools API
*/
@@ -23,7 +49,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
diff --git a/client/pools/update_pool_responses.go b/client/pools/update_pool_responses.go
index 62e83a95..baf5a2bf 100644
--- a/client/pools/update_pool_responses.go
+++ b/client/pools/update_pool_responses.go
@@ -6,6 +6,7 @@ package pools
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *UpdatePoolOK) Code() int {
}
func (o *UpdatePoolOK) Error() string {
- return fmt.Sprintf("[PUT /pools/{poolID}][%d] updatePoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /pools/{poolID}][%d] updatePoolOK %s", 200, payload)
}
func (o *UpdatePoolOK) String() string {
- return fmt.Sprintf("[PUT /pools/{poolID}][%d] updatePoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /pools/{poolID}][%d] updatePoolOK %s", 200, payload)
}
func (o *UpdatePoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *UpdatePoolDefault) Code() int {
}
func (o *UpdatePoolDefault) Error() string {
- return fmt.Sprintf("[PUT /pools/{poolID}][%d] UpdatePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /pools/{poolID}][%d] UpdatePool default %s", o._statusCode, payload)
}
func (o *UpdatePoolDefault) String() string {
- return fmt.Sprintf("[PUT /pools/{poolID}][%d] UpdatePool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /pools/{poolID}][%d] UpdatePool default %s", o._statusCode, payload)
}
func (o *UpdatePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/providers/list_providers_responses.go b/client/providers/list_providers_responses.go
index e80551d1..14a042c9 100644
--- a/client/providers/list_providers_responses.go
+++ b/client/providers/list_providers_responses.go
@@ -6,6 +6,7 @@ package providers
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -86,11 +87,13 @@ func (o *ListProvidersOK) Code() int {
}
func (o *ListProvidersOK) Error() string {
- return fmt.Sprintf("[GET /providers][%d] listProvidersOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /providers][%d] listProvidersOK %s", 200, payload)
}
func (o *ListProvidersOK) String() string {
- return fmt.Sprintf("[GET /providers][%d] listProvidersOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /providers][%d] listProvidersOK %s", 200, payload)
}
func (o *ListProvidersOK) GetPayload() garm_params.Providers {
@@ -152,11 +155,13 @@ func (o *ListProvidersBadRequest) Code() int {
}
func (o *ListProvidersBadRequest) Error() string {
- return fmt.Sprintf("[GET /providers][%d] listProvidersBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /providers][%d] listProvidersBadRequest %s", 400, payload)
}
func (o *ListProvidersBadRequest) String() string {
- return fmt.Sprintf("[GET /providers][%d] listProvidersBadRequest %+v", 400, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /providers][%d] listProvidersBadRequest %s", 400, payload)
}
func (o *ListProvidersBadRequest) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/providers/providers_client.go b/client/providers/providers_client.go
index 3ddfead8..ab2600e8 100644
--- a/client/providers/providers_client.go
+++ b/client/providers/providers_client.go
@@ -9,6 +9,7 @@ import (
"fmt"
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -17,6 +18,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new providers API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new providers API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for providers API
*/
@@ -25,7 +51,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
diff --git a/client/repositories/create_repo_pool_responses.go b/client/repositories/create_repo_pool_responses.go
index e366c67a..2008f1e5 100644
--- a/client/repositories/create_repo_pool_responses.go
+++ b/client/repositories/create_repo_pool_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *CreateRepoPoolOK) Code() int {
}
func (o *CreateRepoPoolOK) Error() string {
- return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] createRepoPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] createRepoPoolOK %s", 200, payload)
}
func (o *CreateRepoPoolOK) String() string {
- return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] createRepoPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] createRepoPoolOK %s", 200, payload)
}
func (o *CreateRepoPoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *CreateRepoPoolDefault) Code() int {
}
func (o *CreateRepoPoolDefault) Error() string {
- return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] CreateRepoPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] CreateRepoPool default %s", o._statusCode, payload)
}
func (o *CreateRepoPoolDefault) String() string {
- return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] CreateRepoPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] CreateRepoPool default %s", o._statusCode, payload)
}
func (o *CreateRepoPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/create_repo_responses.go b/client/repositories/create_repo_responses.go
index 347e9e0a..c5556097 100644
--- a/client/repositories/create_repo_responses.go
+++ b/client/repositories/create_repo_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *CreateRepoOK) Code() int {
}
func (o *CreateRepoOK) Error() string {
- return fmt.Sprintf("[POST /repositories][%d] createRepoOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories][%d] createRepoOK %s", 200, payload)
}
func (o *CreateRepoOK) String() string {
- return fmt.Sprintf("[POST /repositories][%d] createRepoOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories][%d] createRepoOK %s", 200, payload)
}
func (o *CreateRepoOK) GetPayload() garm_params.Repository {
@@ -157,11 +160,13 @@ func (o *CreateRepoDefault) Code() int {
}
func (o *CreateRepoDefault) Error() string {
- return fmt.Sprintf("[POST /repositories][%d] CreateRepo default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories][%d] CreateRepo default %s", o._statusCode, payload)
}
func (o *CreateRepoDefault) String() string {
- return fmt.Sprintf("[POST /repositories][%d] CreateRepo default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories][%d] CreateRepo default %s", o._statusCode, payload)
}
func (o *CreateRepoDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/create_repo_scale_set_parameters.go b/client/repositories/create_repo_scale_set_parameters.go
new file mode 100644
index 00000000..9b8784dc
--- /dev/null
+++ b/client/repositories/create_repo_scale_set_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateRepoScaleSetParams creates a new CreateRepoScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateRepoScaleSetParams() *CreateRepoScaleSetParams {
+ return &CreateRepoScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateRepoScaleSetParamsWithTimeout creates a new CreateRepoScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewCreateRepoScaleSetParamsWithTimeout(timeout time.Duration) *CreateRepoScaleSetParams {
+ return &CreateRepoScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateRepoScaleSetParamsWithContext creates a new CreateRepoScaleSetParams object
+// with the ability to set a context for a request.
+func NewCreateRepoScaleSetParamsWithContext(ctx context.Context) *CreateRepoScaleSetParams {
+ return &CreateRepoScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateRepoScaleSetParamsWithHTTPClient creates a new CreateRepoScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateRepoScaleSetParamsWithHTTPClient(client *http.Client) *CreateRepoScaleSetParams {
+ return &CreateRepoScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateRepoScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the create repo scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateRepoScaleSetParams struct {
+
+ /* Body.
+
+ Parameters used when creating the repository scale set.
+ */
+ Body garm_params.CreateScaleSetParams
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create repo scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateRepoScaleSetParams) WithDefaults() *CreateRepoScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create repo scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateRepoScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithTimeout(timeout time.Duration) *CreateRepoScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithContext(ctx context.Context) *CreateRepoScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithHTTPClient(client *http.Client) *CreateRepoScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithBody(body garm_params.CreateScaleSetParams) *CreateRepoScaleSetParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetBody(body garm_params.CreateScaleSetParams) {
+ o.Body = body
+}
+
+// WithRepoID adds the repoID to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithRepoID(repoID string) *CreateRepoScaleSetParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateRepoScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/create_repo_scale_set_responses.go b/client/repositories/create_repo_scale_set_responses.go
new file mode 100644
index 00000000..4d02d5c1
--- /dev/null
+++ b/client/repositories/create_repo_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateRepoScaleSetReader is a Reader for the CreateRepoScaleSet structure.
+type CreateRepoScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateRepoScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateRepoScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateRepoScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateRepoScaleSetOK creates a CreateRepoScaleSetOK with default headers values
+func NewCreateRepoScaleSetOK() *CreateRepoScaleSetOK {
+ return &CreateRepoScaleSetOK{}
+}
+
+/*
+CreateRepoScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type CreateRepoScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this create repo scale set o k response has a 2xx status code
+func (o *CreateRepoScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create repo scale set o k response has a 3xx status code
+func (o *CreateRepoScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create repo scale set o k response has a 4xx status code
+func (o *CreateRepoScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create repo scale set o k response has a 5xx status code
+func (o *CreateRepoScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create repo scale set o k response a status code equal to that given
+func (o *CreateRepoScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create repo scale set o k response
+func (o *CreateRepoScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *CreateRepoScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/scalesets][%d] createRepoScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateRepoScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/scalesets][%d] createRepoScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateRepoScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *CreateRepoScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateRepoScaleSetDefault creates a CreateRepoScaleSetDefault with default headers values
+func NewCreateRepoScaleSetDefault(code int) *CreateRepoScaleSetDefault {
+ return &CreateRepoScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateRepoScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateRepoScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create repo scale set default response has a 2xx status code
+func (o *CreateRepoScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create repo scale set default response has a 3xx status code
+func (o *CreateRepoScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create repo scale set default response has a 4xx status code
+func (o *CreateRepoScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create repo scale set default response has a 5xx status code
+func (o *CreateRepoScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create repo scale set default response a status code equal to that given
+func (o *CreateRepoScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create repo scale set default response
+func (o *CreateRepoScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateRepoScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/scalesets][%d] CreateRepoScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateRepoScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/scalesets][%d] CreateRepoScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateRepoScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateRepoScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/delete_repo_parameters.go b/client/repositories/delete_repo_parameters.go
index 08ea11d9..8bfd54eb 100644
--- a/client/repositories/delete_repo_parameters.go
+++ b/client/repositories/delete_repo_parameters.go
@@ -14,6 +14,7 @@ import (
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
)
// NewDeleteRepoParams creates a new DeleteRepoParams object,
@@ -61,6 +62,12 @@ DeleteRepoParams contains all the parameters to send to the API endpoint
*/
type DeleteRepoParams struct {
+ /* KeepWebhook.
+
+ If true and a webhook is installed for this repo, it will not be removed.
+ */
+ KeepWebhook *bool
+
/* RepoID.
ID of the repository to delete.
@@ -120,6 +127,17 @@ func (o *DeleteRepoParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
+// WithKeepWebhook adds the keepWebhook to the delete repo params
+func (o *DeleteRepoParams) WithKeepWebhook(keepWebhook *bool) *DeleteRepoParams {
+ o.SetKeepWebhook(keepWebhook)
+ return o
+}
+
+// SetKeepWebhook adds the keepWebhook to the delete repo params
+func (o *DeleteRepoParams) SetKeepWebhook(keepWebhook *bool) {
+ o.KeepWebhook = keepWebhook
+}
+
// WithRepoID adds the repoID to the delete repo params
func (o *DeleteRepoParams) WithRepoID(repoID string) *DeleteRepoParams {
o.SetRepoID(repoID)
@@ -139,6 +157,23 @@ func (o *DeleteRepoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Re
}
var res []error
+ if o.KeepWebhook != nil {
+
+ // query param keepWebhook
+ var qrKeepWebhook bool
+
+ if o.KeepWebhook != nil {
+ qrKeepWebhook = *o.KeepWebhook
+ }
+ qKeepWebhook := swag.FormatBool(qrKeepWebhook)
+ if qKeepWebhook != "" {
+
+ if err := r.SetQueryParam("keepWebhook", qKeepWebhook); err != nil {
+ return err
+ }
+ }
+ }
+
// path param repoID
if err := r.SetPathParam("repoID", o.RepoID); err != nil {
return err
diff --git a/client/repositories/delete_repo_pool_responses.go b/client/repositories/delete_repo_pool_responses.go
index 41f0e9fe..dd34884f 100644
--- a/client/repositories/delete_repo_pool_responses.go
+++ b/client/repositories/delete_repo_pool_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -81,11 +82,13 @@ func (o *DeleteRepoPoolDefault) Code() int {
}
func (o *DeleteRepoPoolDefault) Error() string {
- return fmt.Sprintf("[DELETE /repositories/{repoID}/pools/{poolID}][%d] DeleteRepoPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}/pools/{poolID}][%d] DeleteRepoPool default %s", o._statusCode, payload)
}
func (o *DeleteRepoPoolDefault) String() string {
- return fmt.Sprintf("[DELETE /repositories/{repoID}/pools/{poolID}][%d] DeleteRepoPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}/pools/{poolID}][%d] DeleteRepoPool default %s", o._statusCode, payload)
}
func (o *DeleteRepoPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/delete_repo_responses.go b/client/repositories/delete_repo_responses.go
index a49de666..195d46e7 100644
--- a/client/repositories/delete_repo_responses.go
+++ b/client/repositories/delete_repo_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -81,11 +82,13 @@ func (o *DeleteRepoDefault) Code() int {
}
func (o *DeleteRepoDefault) Error() string {
- return fmt.Sprintf("[DELETE /repositories/{repoID}][%d] DeleteRepo default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}][%d] DeleteRepo default %s", o._statusCode, payload)
}
func (o *DeleteRepoDefault) String() string {
- return fmt.Sprintf("[DELETE /repositories/{repoID}][%d] DeleteRepo default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}][%d] DeleteRepo default %s", o._statusCode, payload)
}
func (o *DeleteRepoDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/get_repo_pool_responses.go b/client/repositories/get_repo_pool_responses.go
index 0c8fce74..eb6e73d3 100644
--- a/client/repositories/get_repo_pool_responses.go
+++ b/client/repositories/get_repo_pool_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *GetRepoPoolOK) Code() int {
}
func (o *GetRepoPoolOK) Error() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] getRepoPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] getRepoPoolOK %s", 200, payload)
}
func (o *GetRepoPoolOK) String() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] getRepoPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] getRepoPoolOK %s", 200, payload)
}
func (o *GetRepoPoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *GetRepoPoolDefault) Code() int {
}
func (o *GetRepoPoolDefault) Error() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] GetRepoPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] GetRepoPool default %s", o._statusCode, payload)
}
func (o *GetRepoPoolDefault) String() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] GetRepoPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] GetRepoPool default %s", o._statusCode, payload)
}
func (o *GetRepoPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/get_repo_responses.go b/client/repositories/get_repo_responses.go
index 4b04f1df..70f25fc8 100644
--- a/client/repositories/get_repo_responses.go
+++ b/client/repositories/get_repo_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *GetRepoOK) Code() int {
}
func (o *GetRepoOK) Error() string {
- return fmt.Sprintf("[GET /repositories/{repoID}][%d] getRepoOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}][%d] getRepoOK %s", 200, payload)
}
func (o *GetRepoOK) String() string {
- return fmt.Sprintf("[GET /repositories/{repoID}][%d] getRepoOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}][%d] getRepoOK %s", 200, payload)
}
func (o *GetRepoOK) GetPayload() garm_params.Repository {
@@ -157,11 +160,13 @@ func (o *GetRepoDefault) Code() int {
}
func (o *GetRepoDefault) Error() string {
- return fmt.Sprintf("[GET /repositories/{repoID}][%d] GetRepo default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}][%d] GetRepo default %s", o._statusCode, payload)
}
func (o *GetRepoDefault) String() string {
- return fmt.Sprintf("[GET /repositories/{repoID}][%d] GetRepo default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}][%d] GetRepo default %s", o._statusCode, payload)
}
func (o *GetRepoDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/get_repo_webhook_info_parameters.go b/client/repositories/get_repo_webhook_info_parameters.go
new file mode 100644
index 00000000..b4c9e515
--- /dev/null
+++ b/client/repositories/get_repo_webhook_info_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetRepoWebhookInfoParams creates a new GetRepoWebhookInfoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetRepoWebhookInfoParams() *GetRepoWebhookInfoParams {
+ return &GetRepoWebhookInfoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetRepoWebhookInfoParamsWithTimeout creates a new GetRepoWebhookInfoParams object
+// with the ability to set a timeout on a request.
+func NewGetRepoWebhookInfoParamsWithTimeout(timeout time.Duration) *GetRepoWebhookInfoParams {
+ return &GetRepoWebhookInfoParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetRepoWebhookInfoParamsWithContext creates a new GetRepoWebhookInfoParams object
+// with the ability to set a context for a request.
+func NewGetRepoWebhookInfoParamsWithContext(ctx context.Context) *GetRepoWebhookInfoParams {
+ return &GetRepoWebhookInfoParams{
+ Context: ctx,
+ }
+}
+
+// NewGetRepoWebhookInfoParamsWithHTTPClient creates a new GetRepoWebhookInfoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetRepoWebhookInfoParamsWithHTTPClient(client *http.Client) *GetRepoWebhookInfoParams {
+ return &GetRepoWebhookInfoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetRepoWebhookInfoParams contains all the parameters to send to the API endpoint
+
+ for the get repo webhook info operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetRepoWebhookInfoParams struct {
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get repo webhook info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRepoWebhookInfoParams) WithDefaults() *GetRepoWebhookInfoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get repo webhook info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRepoWebhookInfoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) WithTimeout(timeout time.Duration) *GetRepoWebhookInfoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) WithContext(ctx context.Context) *GetRepoWebhookInfoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) WithHTTPClient(client *http.Client) *GetRepoWebhookInfoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) WithRepoID(repoID string) *GetRepoWebhookInfoParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetRepoWebhookInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/get_repo_webhook_info_responses.go b/client/repositories/get_repo_webhook_info_responses.go
new file mode 100644
index 00000000..c72d3815
--- /dev/null
+++ b/client/repositories/get_repo_webhook_info_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetRepoWebhookInfoReader is a Reader for the GetRepoWebhookInfo structure.
+type GetRepoWebhookInfoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetRepoWebhookInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetRepoWebhookInfoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetRepoWebhookInfoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetRepoWebhookInfoOK creates a GetRepoWebhookInfoOK with default headers values
+func NewGetRepoWebhookInfoOK() *GetRepoWebhookInfoOK {
+ return &GetRepoWebhookInfoOK{}
+}
+
+/*
+GetRepoWebhookInfoOK describes a response with status code 200, with default header values.
+
+HookInfo
+*/
+type GetRepoWebhookInfoOK struct {
+ Payload garm_params.HookInfo
+}
+
+// IsSuccess returns true when this get repo webhook info o k response has a 2xx status code
+func (o *GetRepoWebhookInfoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get repo webhook info o k response has a 3xx status code
+func (o *GetRepoWebhookInfoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get repo webhook info o k response has a 4xx status code
+func (o *GetRepoWebhookInfoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get repo webhook info o k response has a 5xx status code
+func (o *GetRepoWebhookInfoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get repo webhook info o k response a status code equal to that given
+func (o *GetRepoWebhookInfoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get repo webhook info o k response
+func (o *GetRepoWebhookInfoOK) Code() int {
+ return 200
+}
+
+func (o *GetRepoWebhookInfoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/webhook][%d] getRepoWebhookInfoOK %s", 200, payload)
+}
+
+func (o *GetRepoWebhookInfoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/webhook][%d] getRepoWebhookInfoOK %s", 200, payload)
+}
+
+func (o *GetRepoWebhookInfoOK) GetPayload() garm_params.HookInfo {
+ return o.Payload
+}
+
+func (o *GetRepoWebhookInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetRepoWebhookInfoDefault creates a GetRepoWebhookInfoDefault with default headers values
+func NewGetRepoWebhookInfoDefault(code int) *GetRepoWebhookInfoDefault {
+ return &GetRepoWebhookInfoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetRepoWebhookInfoDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetRepoWebhookInfoDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get repo webhook info default response has a 2xx status code
+func (o *GetRepoWebhookInfoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get repo webhook info default response has a 3xx status code
+func (o *GetRepoWebhookInfoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get repo webhook info default response has a 4xx status code
+func (o *GetRepoWebhookInfoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get repo webhook info default response has a 5xx status code
+func (o *GetRepoWebhookInfoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get repo webhook info default response a status code equal to that given
+func (o *GetRepoWebhookInfoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get repo webhook info default response
+func (o *GetRepoWebhookInfoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetRepoWebhookInfoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/webhook][%d] GetRepoWebhookInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetRepoWebhookInfoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/webhook][%d] GetRepoWebhookInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetRepoWebhookInfoDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetRepoWebhookInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/install_repo_webhook_parameters.go b/client/repositories/install_repo_webhook_parameters.go
new file mode 100644
index 00000000..933ff1b6
--- /dev/null
+++ b/client/repositories/install_repo_webhook_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewInstallRepoWebhookParams creates a new InstallRepoWebhookParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewInstallRepoWebhookParams() *InstallRepoWebhookParams {
+ return &InstallRepoWebhookParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewInstallRepoWebhookParamsWithTimeout creates a new InstallRepoWebhookParams object
+// with the ability to set a timeout on a request.
+func NewInstallRepoWebhookParamsWithTimeout(timeout time.Duration) *InstallRepoWebhookParams {
+ return &InstallRepoWebhookParams{
+ timeout: timeout,
+ }
+}
+
+// NewInstallRepoWebhookParamsWithContext creates a new InstallRepoWebhookParams object
+// with the ability to set a context for a request.
+func NewInstallRepoWebhookParamsWithContext(ctx context.Context) *InstallRepoWebhookParams {
+ return &InstallRepoWebhookParams{
+ Context: ctx,
+ }
+}
+
+// NewInstallRepoWebhookParamsWithHTTPClient creates a new InstallRepoWebhookParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewInstallRepoWebhookParamsWithHTTPClient(client *http.Client) *InstallRepoWebhookParams {
+ return &InstallRepoWebhookParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+InstallRepoWebhookParams contains all the parameters to send to the API endpoint
+
+ for the install repo webhook operation.
+
+ Typically these are written to a http.Request.
+*/
+type InstallRepoWebhookParams struct {
+
+ /* Body.
+
+ Parameters used when creating the repository webhook.
+ */
+ Body garm_params.InstallWebhookParams
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the install repo webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *InstallRepoWebhookParams) WithDefaults() *InstallRepoWebhookParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the install repo webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *InstallRepoWebhookParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithTimeout(timeout time.Duration) *InstallRepoWebhookParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithContext(ctx context.Context) *InstallRepoWebhookParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithHTTPClient(client *http.Client) *InstallRepoWebhookParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithBody(body garm_params.InstallWebhookParams) *InstallRepoWebhookParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetBody(body garm_params.InstallWebhookParams) {
+ o.Body = body
+}
+
+// WithRepoID adds the repoID to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithRepoID(repoID string) *InstallRepoWebhookParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *InstallRepoWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/install_repo_webhook_responses.go b/client/repositories/install_repo_webhook_responses.go
new file mode 100644
index 00000000..c8690bcc
--- /dev/null
+++ b/client/repositories/install_repo_webhook_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// InstallRepoWebhookReader is a Reader for the InstallRepoWebhook structure.
+type InstallRepoWebhookReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *InstallRepoWebhookReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewInstallRepoWebhookOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewInstallRepoWebhookDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewInstallRepoWebhookOK creates a InstallRepoWebhookOK with default headers values
+func NewInstallRepoWebhookOK() *InstallRepoWebhookOK {
+ return &InstallRepoWebhookOK{}
+}
+
+/*
+InstallRepoWebhookOK describes a response with status code 200, with default header values.
+
+HookInfo
+*/
+type InstallRepoWebhookOK struct {
+ Payload garm_params.HookInfo
+}
+
+// IsSuccess returns true when this install repo webhook o k response has a 2xx status code
+func (o *InstallRepoWebhookOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this install repo webhook o k response has a 3xx status code
+func (o *InstallRepoWebhookOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this install repo webhook o k response has a 4xx status code
+func (o *InstallRepoWebhookOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this install repo webhook o k response has a 5xx status code
+func (o *InstallRepoWebhookOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this install repo webhook o k response a status code equal to that given
+func (o *InstallRepoWebhookOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the install repo webhook o k response
+func (o *InstallRepoWebhookOK) Code() int {
+ return 200
+}
+
+func (o *InstallRepoWebhookOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/webhook][%d] installRepoWebhookOK %s", 200, payload)
+}
+
+func (o *InstallRepoWebhookOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/webhook][%d] installRepoWebhookOK %s", 200, payload)
+}
+
+func (o *InstallRepoWebhookOK) GetPayload() garm_params.HookInfo {
+ return o.Payload
+}
+
+func (o *InstallRepoWebhookOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewInstallRepoWebhookDefault creates a InstallRepoWebhookDefault with default headers values
+func NewInstallRepoWebhookDefault(code int) *InstallRepoWebhookDefault {
+ return &InstallRepoWebhookDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+InstallRepoWebhookDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type InstallRepoWebhookDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this install repo webhook default response has a 2xx status code
+func (o *InstallRepoWebhookDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this install repo webhook default response has a 3xx status code
+func (o *InstallRepoWebhookDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this install repo webhook default response has a 4xx status code
+func (o *InstallRepoWebhookDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this install repo webhook default response has a 5xx status code
+func (o *InstallRepoWebhookDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this install repo webhook default response a status code equal to that given
+func (o *InstallRepoWebhookDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the install repo webhook default response
+func (o *InstallRepoWebhookDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *InstallRepoWebhookDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/webhook][%d] InstallRepoWebhook default %s", o._statusCode, payload)
+}
+
+func (o *InstallRepoWebhookDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/webhook][%d] InstallRepoWebhook default %s", o._statusCode, payload)
+}
+
+func (o *InstallRepoWebhookDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *InstallRepoWebhookDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/list_repo_instances_responses.go b/client/repositories/list_repo_instances_responses.go
index 39c39eb8..5c49b701 100644
--- a/client/repositories/list_repo_instances_responses.go
+++ b/client/repositories/list_repo_instances_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListRepoInstancesOK) Code() int {
}
func (o *ListRepoInstancesOK) Error() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] listRepoInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] listRepoInstancesOK %s", 200, payload)
}
func (o *ListRepoInstancesOK) String() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] listRepoInstancesOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] listRepoInstancesOK %s", 200, payload)
}
func (o *ListRepoInstancesOK) GetPayload() garm_params.Instances {
@@ -157,11 +160,13 @@ func (o *ListRepoInstancesDefault) Code() int {
}
func (o *ListRepoInstancesDefault) Error() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] ListRepoInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] ListRepoInstances default %s", o._statusCode, payload)
}
func (o *ListRepoInstancesDefault) String() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] ListRepoInstances default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] ListRepoInstances default %s", o._statusCode, payload)
}
func (o *ListRepoInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/list_repo_pools_responses.go b/client/repositories/list_repo_pools_responses.go
index c6ba64ca..c16777cb 100644
--- a/client/repositories/list_repo_pools_responses.go
+++ b/client/repositories/list_repo_pools_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListRepoPoolsOK) Code() int {
}
func (o *ListRepoPoolsOK) Error() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] listRepoPoolsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] listRepoPoolsOK %s", 200, payload)
}
func (o *ListRepoPoolsOK) String() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] listRepoPoolsOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] listRepoPoolsOK %s", 200, payload)
}
func (o *ListRepoPoolsOK) GetPayload() garm_params.Pools {
@@ -157,11 +160,13 @@ func (o *ListRepoPoolsDefault) Code() int {
}
func (o *ListRepoPoolsDefault) Error() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] ListRepoPools default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] ListRepoPools default %s", o._statusCode, payload)
}
func (o *ListRepoPoolsDefault) String() string {
- return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] ListRepoPools default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] ListRepoPools default %s", o._statusCode, payload)
}
func (o *ListRepoPoolsDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/list_repo_scale_sets_parameters.go b/client/repositories/list_repo_scale_sets_parameters.go
new file mode 100644
index 00000000..2582c498
--- /dev/null
+++ b/client/repositories/list_repo_scale_sets_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListRepoScaleSetsParams creates a new ListRepoScaleSetsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListRepoScaleSetsParams() *ListRepoScaleSetsParams {
+ return &ListRepoScaleSetsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListRepoScaleSetsParamsWithTimeout creates a new ListRepoScaleSetsParams object
+// with the ability to set a timeout on a request.
+func NewListRepoScaleSetsParamsWithTimeout(timeout time.Duration) *ListRepoScaleSetsParams {
+ return &ListRepoScaleSetsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListRepoScaleSetsParamsWithContext creates a new ListRepoScaleSetsParams object
+// with the ability to set a context for a request.
+func NewListRepoScaleSetsParamsWithContext(ctx context.Context) *ListRepoScaleSetsParams {
+ return &ListRepoScaleSetsParams{
+ Context: ctx,
+ }
+}
+
+// NewListRepoScaleSetsParamsWithHTTPClient creates a new ListRepoScaleSetsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListRepoScaleSetsParamsWithHTTPClient(client *http.Client) *ListRepoScaleSetsParams {
+ return &ListRepoScaleSetsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListRepoScaleSetsParams contains all the parameters to send to the API endpoint
+
+ for the list repo scale sets operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListRepoScaleSetsParams struct {
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list repo scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListRepoScaleSetsParams) WithDefaults() *ListRepoScaleSetsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list repo scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListRepoScaleSetsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) WithTimeout(timeout time.Duration) *ListRepoScaleSetsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) WithContext(ctx context.Context) *ListRepoScaleSetsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) WithHTTPClient(client *http.Client) *ListRepoScaleSetsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) WithRepoID(repoID string) *ListRepoScaleSetsParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListRepoScaleSetsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/list_repo_scale_sets_responses.go b/client/repositories/list_repo_scale_sets_responses.go
new file mode 100644
index 00000000..4e2d98a2
--- /dev/null
+++ b/client/repositories/list_repo_scale_sets_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListRepoScaleSetsReader is a Reader for the ListRepoScaleSets structure.
+type ListRepoScaleSetsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListRepoScaleSetsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListRepoScaleSetsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListRepoScaleSetsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListRepoScaleSetsOK creates a ListRepoScaleSetsOK with default headers values
+func NewListRepoScaleSetsOK() *ListRepoScaleSetsOK {
+ return &ListRepoScaleSetsOK{}
+}
+
+/*
+ListRepoScaleSetsOK describes a response with status code 200, with default header values.
+
+ScaleSets
+*/
+type ListRepoScaleSetsOK struct {
+ Payload garm_params.ScaleSets
+}
+
+// IsSuccess returns true when this list repo scale sets o k response has a 2xx status code
+func (o *ListRepoScaleSetsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list repo scale sets o k response has a 3xx status code
+func (o *ListRepoScaleSetsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list repo scale sets o k response has a 4xx status code
+func (o *ListRepoScaleSetsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list repo scale sets o k response has a 5xx status code
+func (o *ListRepoScaleSetsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list repo scale sets o k response a status code equal to that given
+func (o *ListRepoScaleSetsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list repo scale sets o k response
+func (o *ListRepoScaleSetsOK) Code() int {
+ return 200
+}
+
+func (o *ListRepoScaleSetsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/scalesets][%d] listRepoScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListRepoScaleSetsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/scalesets][%d] listRepoScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListRepoScaleSetsOK) GetPayload() garm_params.ScaleSets {
+ return o.Payload
+}
+
+func (o *ListRepoScaleSetsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListRepoScaleSetsDefault creates a ListRepoScaleSetsDefault with default headers values
+func NewListRepoScaleSetsDefault(code int) *ListRepoScaleSetsDefault {
+ return &ListRepoScaleSetsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListRepoScaleSetsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListRepoScaleSetsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list repo scale sets default response has a 2xx status code
+func (o *ListRepoScaleSetsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list repo scale sets default response has a 3xx status code
+func (o *ListRepoScaleSetsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list repo scale sets default response has a 4xx status code
+func (o *ListRepoScaleSetsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list repo scale sets default response has a 5xx status code
+func (o *ListRepoScaleSetsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list repo scale sets default response a status code equal to that given
+func (o *ListRepoScaleSetsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list repo scale sets default response
+func (o *ListRepoScaleSetsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListRepoScaleSetsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/scalesets][%d] ListRepoScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListRepoScaleSetsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/scalesets][%d] ListRepoScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListRepoScaleSetsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListRepoScaleSetsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/list_repos_parameters.go b/client/repositories/list_repos_parameters.go
index f4e17d79..9998a1ba 100644
--- a/client/repositories/list_repos_parameters.go
+++ b/client/repositories/list_repos_parameters.go
@@ -60,6 +60,25 @@ ListReposParams contains all the parameters to send to the API endpoint
Typically these are written to a http.Request.
*/
type ListReposParams struct {
+
+ /* Endpoint.
+
+ Exact endpoint name to filter by
+ */
+ Endpoint *string
+
+ /* Name.
+
+ Exact repository name to filter by
+ */
+ Name *string
+
+ /* Owner.
+
+ Exact owner name to filter by
+ */
+ Owner *string
+
timeout time.Duration
Context context.Context
HTTPClient *http.Client
@@ -113,6 +132,39 @@ func (o *ListReposParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
+// WithEndpoint adds the endpoint to the list repos params
+func (o *ListReposParams) WithEndpoint(endpoint *string) *ListReposParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the list repos params
+func (o *ListReposParams) SetEndpoint(endpoint *string) {
+ o.Endpoint = endpoint
+}
+
+// WithName adds the name to the list repos params
+func (o *ListReposParams) WithName(name *string) *ListReposParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the list repos params
+func (o *ListReposParams) SetName(name *string) {
+ o.Name = name
+}
+
+// WithOwner adds the owner to the list repos params
+func (o *ListReposParams) WithOwner(owner *string) *ListReposParams {
+ o.SetOwner(owner)
+ return o
+}
+
+// SetOwner adds the owner to the list repos params
+func (o *ListReposParams) SetOwner(owner *string) {
+ o.Owner = owner
+}
+
// WriteToRequest writes these params to a swagger request
func (o *ListReposParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
@@ -121,6 +173,57 @@ func (o *ListReposParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Reg
}
var res []error
+ if o.Endpoint != nil {
+
+ // query param endpoint
+ var qrEndpoint string
+
+ if o.Endpoint != nil {
+ qrEndpoint = *o.Endpoint
+ }
+ qEndpoint := qrEndpoint
+ if qEndpoint != "" {
+
+ if err := r.SetQueryParam("endpoint", qEndpoint); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Name != nil {
+
+ // query param name
+ var qrName string
+
+ if o.Name != nil {
+ qrName = *o.Name
+ }
+ qName := qrName
+ if qName != "" {
+
+ if err := r.SetQueryParam("name", qName); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Owner != nil {
+
+ // query param owner
+ var qrOwner string
+
+ if o.Owner != nil {
+ qrOwner = *o.Owner
+ }
+ qOwner := qrOwner
+ if qOwner != "" {
+
+ if err := r.SetQueryParam("owner", qOwner); err != nil {
+ return err
+ }
+ }
+ }
+
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
diff --git a/client/repositories/list_repos_responses.go b/client/repositories/list_repos_responses.go
index 4346f0b5..a45e2c0d 100644
--- a/client/repositories/list_repos_responses.go
+++ b/client/repositories/list_repos_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *ListReposOK) Code() int {
}
func (o *ListReposOK) Error() string {
- return fmt.Sprintf("[GET /repositories][%d] listReposOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories][%d] listReposOK %s", 200, payload)
}
func (o *ListReposOK) String() string {
- return fmt.Sprintf("[GET /repositories][%d] listReposOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories][%d] listReposOK %s", 200, payload)
}
func (o *ListReposOK) GetPayload() garm_params.Repositories {
@@ -157,11 +160,13 @@ func (o *ListReposDefault) Code() int {
}
func (o *ListReposDefault) Error() string {
- return fmt.Sprintf("[GET /repositories][%d] ListRepos default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories][%d] ListRepos default %s", o._statusCode, payload)
}
func (o *ListReposDefault) String() string {
- return fmt.Sprintf("[GET /repositories][%d] ListRepos default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories][%d] ListRepos default %s", o._statusCode, payload)
}
func (o *ListReposDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/repositories_client.go b/client/repositories/repositories_client.go
index e9d2619d..017bf0f8 100644
--- a/client/repositories/repositories_client.go
+++ b/client/repositories/repositories_client.go
@@ -7,6 +7,7 @@ package repositories
import (
"github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@@ -15,6 +16,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
+// New creates a new repositories API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new repositories API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
/*
Client for repositories API
*/
@@ -23,7 +49,7 @@ type Client struct {
formats strfmt.Registry
}
-// ClientOption is the option for Client methods
+// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
@@ -32,6 +58,8 @@ type ClientService interface {
CreateRepoPool(params *CreateRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoPoolOK, error)
+ CreateRepoScaleSet(params *CreateRepoScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoScaleSetOK, error)
+
DeleteRepo(params *DeleteRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
DeleteRepoPool(params *DeleteRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
@@ -40,12 +68,20 @@ type ClientService interface {
GetRepoPool(params *GetRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoPoolOK, error)
+ GetRepoWebhookInfo(params *GetRepoWebhookInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoWebhookInfoOK, error)
+
+ InstallRepoWebhook(params *InstallRepoWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*InstallRepoWebhookOK, error)
+
ListRepoInstances(params *ListRepoInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoInstancesOK, error)
ListRepoPools(params *ListRepoPoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoPoolsOK, error)
+ ListRepoScaleSets(params *ListRepoScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoScaleSetsOK, error)
+
ListRepos(params *ListReposParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListReposOK, error)
+ UninstallRepoWebhook(params *UninstallRepoWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
UpdateRepo(params *UpdateRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateRepoOK, error)
UpdateRepoPool(params *UpdateRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateRepoPoolOK, error)
@@ -129,6 +165,44 @@ func (a *Client) CreateRepoPool(params *CreateRepoPoolParams, authInfo runtime.C
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+CreateRepoScaleSet creates repository scale set with the parameters given
+*/
+func (a *Client) CreateRepoScaleSet(params *CreateRepoScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateRepoScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateRepoScaleSet",
+ Method: "POST",
+ PathPattern: "/repositories/{repoID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateRepoScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateRepoScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateRepoScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
/*
DeleteRepo deletes repository by ID
*/
@@ -269,6 +343,84 @@ func (a *Client) GetRepoPool(params *GetRepoPoolParams, authInfo runtime.ClientA
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+GetRepoWebhookInfo gets information about the g a r m installed webhook on a repository
+*/
+func (a *Client) GetRepoWebhookInfo(params *GetRepoWebhookInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoWebhookInfoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetRepoWebhookInfoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetRepoWebhookInfo",
+ Method: "GET",
+ PathPattern: "/repositories/{repoID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetRepoWebhookInfoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetRepoWebhookInfoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetRepoWebhookInfoDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ InstallRepoWebhook Install the GARM webhook for an organization. The secret configured on the organization will
+
+be used to validate the requests.
+*/
+func (a *Client) InstallRepoWebhook(params *InstallRepoWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*InstallRepoWebhookOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewInstallRepoWebhookParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "InstallRepoWebhook",
+ Method: "POST",
+ PathPattern: "/repositories/{repoID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &InstallRepoWebhookReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*InstallRepoWebhookOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*InstallRepoWebhookDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
/*
ListRepoInstances lists repository instances
*/
@@ -345,6 +497,44 @@ func (a *Client) ListRepoPools(params *ListRepoPoolsParams, authInfo runtime.Cli
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+ListRepoScaleSets lists repository scale sets
+*/
+func (a *Client) ListRepoScaleSets(params *ListRepoScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoScaleSetsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListRepoScaleSetsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListRepoScaleSets",
+ Method: "GET",
+ PathPattern: "/repositories/{repoID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListRepoScaleSetsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListRepoScaleSetsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListRepoScaleSetsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
/*
ListRepos lists repositories
*/
@@ -383,6 +573,38 @@ func (a *Client) ListRepos(params *ListReposParams, authInfo runtime.ClientAuthI
return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
}
+/*
+UninstallRepoWebhook uninstalls organization webhook
+*/
+func (a *Client) UninstallRepoWebhook(params *UninstallRepoWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUninstallRepoWebhookParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UninstallRepoWebhook",
+ Method: "DELETE",
+ PathPattern: "/repositories/{repoID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UninstallRepoWebhookReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
/*
UpdateRepo updates repository with the parameters given
*/
diff --git a/client/repositories/uninstall_repo_webhook_parameters.go b/client/repositories/uninstall_repo_webhook_parameters.go
new file mode 100644
index 00000000..acefd615
--- /dev/null
+++ b/client/repositories/uninstall_repo_webhook_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewUninstallRepoWebhookParams creates a new UninstallRepoWebhookParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUninstallRepoWebhookParams() *UninstallRepoWebhookParams {
+ return &UninstallRepoWebhookParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUninstallRepoWebhookParamsWithTimeout creates a new UninstallRepoWebhookParams object
+// with the ability to set a timeout on a request.
+func NewUninstallRepoWebhookParamsWithTimeout(timeout time.Duration) *UninstallRepoWebhookParams {
+ return &UninstallRepoWebhookParams{
+ timeout: timeout,
+ }
+}
+
+// NewUninstallRepoWebhookParamsWithContext creates a new UninstallRepoWebhookParams object
+// with the ability to set a context for a request.
+func NewUninstallRepoWebhookParamsWithContext(ctx context.Context) *UninstallRepoWebhookParams {
+ return &UninstallRepoWebhookParams{
+ Context: ctx,
+ }
+}
+
+// NewUninstallRepoWebhookParamsWithHTTPClient creates a new UninstallRepoWebhookParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUninstallRepoWebhookParamsWithHTTPClient(client *http.Client) *UninstallRepoWebhookParams {
+ return &UninstallRepoWebhookParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UninstallRepoWebhookParams contains all the parameters to send to the API endpoint
+
+ for the uninstall repo webhook operation.
+
+ Typically these are written to a http.Request.
+*/
+type UninstallRepoWebhookParams struct {
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the uninstall repo webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UninstallRepoWebhookParams) WithDefaults() *UninstallRepoWebhookParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the uninstall repo webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UninstallRepoWebhookParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) WithTimeout(timeout time.Duration) *UninstallRepoWebhookParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) WithContext(ctx context.Context) *UninstallRepoWebhookParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) WithHTTPClient(client *http.Client) *UninstallRepoWebhookParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) WithRepoID(repoID string) *UninstallRepoWebhookParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UninstallRepoWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/uninstall_repo_webhook_responses.go b/client/repositories/uninstall_repo_webhook_responses.go
new file mode 100644
index 00000000..54a66cf1
--- /dev/null
+++ b/client/repositories/uninstall_repo_webhook_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// UninstallRepoWebhookReader is a Reader for the UninstallRepoWebhook structure.
+type UninstallRepoWebhookReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UninstallRepoWebhookReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewUninstallRepoWebhookDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewUninstallRepoWebhookDefault creates a UninstallRepoWebhookDefault with default headers values
+func NewUninstallRepoWebhookDefault(code int) *UninstallRepoWebhookDefault {
+ return &UninstallRepoWebhookDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UninstallRepoWebhookDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UninstallRepoWebhookDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this uninstall repo webhook default response has a 2xx status code
+func (o *UninstallRepoWebhookDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this uninstall repo webhook default response has a 3xx status code
+func (o *UninstallRepoWebhookDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this uninstall repo webhook default response has a 4xx status code
+func (o *UninstallRepoWebhookDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this uninstall repo webhook default response has a 5xx status code
+func (o *UninstallRepoWebhookDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this uninstall repo webhook default response a status code equal to that given
+func (o *UninstallRepoWebhookDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the uninstall repo webhook default response
+func (o *UninstallRepoWebhookDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UninstallRepoWebhookDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}/webhook][%d] UninstallRepoWebhook default %s", o._statusCode, payload)
+}
+
+func (o *UninstallRepoWebhookDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}/webhook][%d] UninstallRepoWebhook default %s", o._statusCode, payload)
+}
+
+func (o *UninstallRepoWebhookDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UninstallRepoWebhookDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/update_repo_pool_responses.go b/client/repositories/update_repo_pool_responses.go
index e9f7a7e4..8d5da3f7 100644
--- a/client/repositories/update_repo_pool_responses.go
+++ b/client/repositories/update_repo_pool_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *UpdateRepoPoolOK) Code() int {
}
func (o *UpdateRepoPoolOK) Error() string {
- return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] updateRepoPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] updateRepoPoolOK %s", 200, payload)
}
func (o *UpdateRepoPoolOK) String() string {
- return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] updateRepoPoolOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] updateRepoPoolOK %s", 200, payload)
}
func (o *UpdateRepoPoolOK) GetPayload() garm_params.Pool {
@@ -157,11 +160,13 @@ func (o *UpdateRepoPoolDefault) Code() int {
}
func (o *UpdateRepoPoolDefault) Error() string {
- return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] UpdateRepoPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] UpdateRepoPool default %s", o._statusCode, payload)
}
func (o *UpdateRepoPoolDefault) String() string {
- return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] UpdateRepoPool default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] UpdateRepoPool default %s", o._statusCode, payload)
}
func (o *UpdateRepoPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/repositories/update_repo_responses.go b/client/repositories/update_repo_responses.go
index 26b6fd8a..117d6bb9 100644
--- a/client/repositories/update_repo_responses.go
+++ b/client/repositories/update_repo_responses.go
@@ -6,6 +6,7 @@ package repositories
// Editing this file might prove futile when you re-run the swagger generate command
import (
+ "encoding/json"
"fmt"
"io"
@@ -87,11 +88,13 @@ func (o *UpdateRepoOK) Code() int {
}
func (o *UpdateRepoOK) Error() string {
- return fmt.Sprintf("[PUT /repositories/{repoID}][%d] updateRepoOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}][%d] updateRepoOK %s", 200, payload)
}
func (o *UpdateRepoOK) String() string {
- return fmt.Sprintf("[PUT /repositories/{repoID}][%d] updateRepoOK %+v", 200, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}][%d] updateRepoOK %s", 200, payload)
}
func (o *UpdateRepoOK) GetPayload() garm_params.Repository {
@@ -157,11 +160,13 @@ func (o *UpdateRepoDefault) Code() int {
}
func (o *UpdateRepoDefault) Error() string {
- return fmt.Sprintf("[PUT /repositories/{repoID}][%d] UpdateRepo default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}][%d] UpdateRepo default %s", o._statusCode, payload)
}
func (o *UpdateRepoDefault) String() string {
- return fmt.Sprintf("[PUT /repositories/{repoID}][%d] UpdateRepo default %+v", o._statusCode, o.Payload)
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}][%d] UpdateRepo default %s", o._statusCode, payload)
}
func (o *UpdateRepoDefault) GetPayload() apiserver_params.APIErrorResponse {
diff --git a/client/scalesets/delete_scale_set_parameters.go b/client/scalesets/delete_scale_set_parameters.go
new file mode 100644
index 00000000..640f95a8
--- /dev/null
+++ b/client/scalesets/delete_scale_set_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteScaleSetParams creates a new DeleteScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteScaleSetParams() *DeleteScaleSetParams {
+ return &DeleteScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteScaleSetParamsWithTimeout creates a new DeleteScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewDeleteScaleSetParamsWithTimeout(timeout time.Duration) *DeleteScaleSetParams {
+ return &DeleteScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteScaleSetParamsWithContext creates a new DeleteScaleSetParams object
+// with the ability to set a context for a request.
+func NewDeleteScaleSetParamsWithContext(ctx context.Context) *DeleteScaleSetParams {
+ return &DeleteScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteScaleSetParamsWithHTTPClient creates a new DeleteScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteScaleSetParamsWithHTTPClient(client *http.Client) *DeleteScaleSetParams {
+ return &DeleteScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the delete scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteScaleSetParams struct {
+
+ /* ScalesetID.
+
+ ID of the scale set to delete.
+ */
+ ScalesetID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteScaleSetParams) WithDefaults() *DeleteScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete scale set params
+func (o *DeleteScaleSetParams) WithTimeout(timeout time.Duration) *DeleteScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete scale set params
+func (o *DeleteScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete scale set params
+func (o *DeleteScaleSetParams) WithContext(ctx context.Context) *DeleteScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete scale set params
+func (o *DeleteScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete scale set params
+func (o *DeleteScaleSetParams) WithHTTPClient(client *http.Client) *DeleteScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete scale set params
+func (o *DeleteScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithScalesetID adds the scalesetID to the delete scale set params
+func (o *DeleteScaleSetParams) WithScalesetID(scalesetID string) *DeleteScaleSetParams {
+ o.SetScalesetID(scalesetID)
+ return o
+}
+
+// SetScalesetID adds the scalesetId to the delete scale set params
+func (o *DeleteScaleSetParams) SetScalesetID(scalesetID string) {
+ o.ScalesetID = scalesetID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param scalesetID
+ if err := r.SetPathParam("scalesetID", o.ScalesetID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/scalesets/delete_scale_set_responses.go b/client/scalesets/delete_scale_set_responses.go
new file mode 100644
index 00000000..dd0f7334
--- /dev/null
+++ b/client/scalesets/delete_scale_set_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteScaleSetReader is a Reader for the DeleteScaleSet structure.
+type DeleteScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteScaleSetDefault creates a DeleteScaleSetDefault with default headers values
+func NewDeleteScaleSetDefault(code int) *DeleteScaleSetDefault {
+ return &DeleteScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete scale set default response has a 2xx status code
+func (o *DeleteScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete scale set default response has a 3xx status code
+func (o *DeleteScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete scale set default response has a 4xx status code
+func (o *DeleteScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete scale set default response has a 5xx status code
+func (o *DeleteScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete scale set default response a status code equal to that given
+func (o *DeleteScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete scale set default response
+func (o *DeleteScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /scalesets/{scalesetID}][%d] DeleteScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *DeleteScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /scalesets/{scalesetID}][%d] DeleteScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *DeleteScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/scalesets/get_scale_set_parameters.go b/client/scalesets/get_scale_set_parameters.go
new file mode 100644
index 00000000..9e31b46e
--- /dev/null
+++ b/client/scalesets/get_scale_set_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetScaleSetParams creates a new GetScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetScaleSetParams() *GetScaleSetParams {
+ return &GetScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetScaleSetParamsWithTimeout creates a new GetScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewGetScaleSetParamsWithTimeout(timeout time.Duration) *GetScaleSetParams {
+ return &GetScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetScaleSetParamsWithContext creates a new GetScaleSetParams object
+// with the ability to set a context for a request.
+func NewGetScaleSetParamsWithContext(ctx context.Context) *GetScaleSetParams {
+ return &GetScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewGetScaleSetParamsWithHTTPClient creates a new GetScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetScaleSetParamsWithHTTPClient(client *http.Client) *GetScaleSetParams {
+ return &GetScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the get scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetScaleSetParams struct {
+
+ /* ScalesetID.
+
+ ID of the scale set to fetch.
+ */
+ ScalesetID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetScaleSetParams) WithDefaults() *GetScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get scale set params
+func (o *GetScaleSetParams) WithTimeout(timeout time.Duration) *GetScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get scale set params
+func (o *GetScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get scale set params
+func (o *GetScaleSetParams) WithContext(ctx context.Context) *GetScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get scale set params
+func (o *GetScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get scale set params
+func (o *GetScaleSetParams) WithHTTPClient(client *http.Client) *GetScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get scale set params
+func (o *GetScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithScalesetID adds the scalesetID to the get scale set params
+func (o *GetScaleSetParams) WithScalesetID(scalesetID string) *GetScaleSetParams {
+ o.SetScalesetID(scalesetID)
+ return o
+}
+
+// SetScalesetID adds the scalesetId to the get scale set params
+func (o *GetScaleSetParams) SetScalesetID(scalesetID string) {
+ o.ScalesetID = scalesetID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param scalesetID
+ if err := r.SetPathParam("scalesetID", o.ScalesetID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/scalesets/get_scale_set_responses.go b/client/scalesets/get_scale_set_responses.go
new file mode 100644
index 00000000..5b30e16f
--- /dev/null
+++ b/client/scalesets/get_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetScaleSetReader is a Reader for the GetScaleSet structure.
+type GetScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetScaleSetOK creates a GetScaleSetOK with default headers values
+func NewGetScaleSetOK() *GetScaleSetOK {
+ return &GetScaleSetOK{}
+}
+
+/*
+GetScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type GetScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this get scale set o k response has a 2xx status code
+func (o *GetScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get scale set o k response has a 3xx status code
+func (o *GetScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get scale set o k response has a 4xx status code
+func (o *GetScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get scale set o k response has a 5xx status code
+func (o *GetScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get scale set o k response a status code equal to that given
+func (o *GetScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get scale set o k response
+func (o *GetScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *GetScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}][%d] getScaleSetOK %s", 200, payload)
+}
+
+func (o *GetScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}][%d] getScaleSetOK %s", 200, payload)
+}
+
+func (o *GetScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *GetScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetScaleSetDefault creates a GetScaleSetDefault with default headers values
+func NewGetScaleSetDefault(code int) *GetScaleSetDefault {
+ return &GetScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get scale set default response has a 2xx status code
+func (o *GetScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get scale set default response has a 3xx status code
+func (o *GetScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get scale set default response has a 4xx status code
+func (o *GetScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get scale set default response has a 5xx status code
+func (o *GetScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get scale set default response a status code equal to that given
+func (o *GetScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get scale set default response
+func (o *GetScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}][%d] GetScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *GetScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}][%d] GetScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *GetScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/scalesets/list_scalesets_parameters.go b/client/scalesets/list_scalesets_parameters.go
new file mode 100644
index 00000000..b6fd1ccb
--- /dev/null
+++ b/client/scalesets/list_scalesets_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListScalesetsParams creates a new ListScalesetsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListScalesetsParams() *ListScalesetsParams {
+ return &ListScalesetsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListScalesetsParamsWithTimeout creates a new ListScalesetsParams object
+// with the ability to set a timeout on a request.
+func NewListScalesetsParamsWithTimeout(timeout time.Duration) *ListScalesetsParams {
+ return &ListScalesetsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListScalesetsParamsWithContext creates a new ListScalesetsParams object
+// with the ability to set a context for a request.
+func NewListScalesetsParamsWithContext(ctx context.Context) *ListScalesetsParams {
+ return &ListScalesetsParams{
+ Context: ctx,
+ }
+}
+
+// NewListScalesetsParamsWithHTTPClient creates a new ListScalesetsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListScalesetsParamsWithHTTPClient(client *http.Client) *ListScalesetsParams {
+ return &ListScalesetsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListScalesetsParams contains all the parameters to send to the API endpoint
+
+ for the list scalesets operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListScalesetsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list scalesets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListScalesetsParams) WithDefaults() *ListScalesetsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list scalesets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListScalesetsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list scalesets params
+func (o *ListScalesetsParams) WithTimeout(timeout time.Duration) *ListScalesetsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list scalesets params
+func (o *ListScalesetsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list scalesets params
+func (o *ListScalesetsParams) WithContext(ctx context.Context) *ListScalesetsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list scalesets params
+func (o *ListScalesetsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list scalesets params
+func (o *ListScalesetsParams) WithHTTPClient(client *http.Client) *ListScalesetsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list scalesets params
+func (o *ListScalesetsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListScalesetsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/scalesets/list_scalesets_responses.go b/client/scalesets/list_scalesets_responses.go
new file mode 100644
index 00000000..05064308
--- /dev/null
+++ b/client/scalesets/list_scalesets_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListScalesetsReader is a Reader for the ListScalesets structure.
+type ListScalesetsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListScalesetsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListScalesetsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListScalesetsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListScalesetsOK creates a ListScalesetsOK with default headers values
+func NewListScalesetsOK() *ListScalesetsOK {
+ return &ListScalesetsOK{}
+}
+
+/*
+ListScalesetsOK describes a response with status code 200, with default header values.
+
+ScaleSets
+*/
+type ListScalesetsOK struct {
+ Payload garm_params.ScaleSets
+}
+
+// IsSuccess returns true when this list scalesets o k response has a 2xx status code
+func (o *ListScalesetsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list scalesets o k response has a 3xx status code
+func (o *ListScalesetsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list scalesets o k response has a 4xx status code
+func (o *ListScalesetsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list scalesets o k response has a 5xx status code
+func (o *ListScalesetsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list scalesets o k response a status code equal to that given
+func (o *ListScalesetsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list scalesets o k response
+func (o *ListScalesetsOK) Code() int {
+ return 200
+}
+
+func (o *ListScalesetsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets][%d] listScalesetsOK %s", 200, payload)
+}
+
+func (o *ListScalesetsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets][%d] listScalesetsOK %s", 200, payload)
+}
+
+func (o *ListScalesetsOK) GetPayload() garm_params.ScaleSets {
+ return o.Payload
+}
+
+func (o *ListScalesetsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListScalesetsDefault creates a ListScalesetsDefault with default headers values
+func NewListScalesetsDefault(code int) *ListScalesetsDefault {
+ return &ListScalesetsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListScalesetsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListScalesetsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list scalesets default response has a 2xx status code
+func (o *ListScalesetsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list scalesets default response has a 3xx status code
+func (o *ListScalesetsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list scalesets default response has a 4xx status code
+func (o *ListScalesetsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list scalesets default response has a 5xx status code
+func (o *ListScalesetsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list scalesets default response a status code equal to that given
+func (o *ListScalesetsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list scalesets default response
+func (o *ListScalesetsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListScalesetsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets][%d] ListScalesets default %s", o._statusCode, payload)
+}
+
+func (o *ListScalesetsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets][%d] ListScalesets default %s", o._statusCode, payload)
+}
+
+func (o *ListScalesetsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListScalesetsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/scalesets/scalesets_client.go b/client/scalesets/scalesets_client.go
new file mode 100644
index 00000000..5375750d
--- /dev/null
+++ b/client/scalesets/scalesets_client.go
@@ -0,0 +1,217 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new scalesets API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new scalesets API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new scalesets API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for scalesets API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeleteScaleSet(params *DeleteScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetScaleSet(params *GetScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetScaleSetOK, error)
+
+ ListScalesets(params *ListScalesetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListScalesetsOK, error)
+
+ UpdateScaleSet(params *UpdateScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateScaleSetOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeleteScaleSet deletes scale set by ID
+*/
+func (a *Client) DeleteScaleSet(params *DeleteScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteScaleSet",
+ Method: "DELETE",
+ PathPattern: "/scalesets/{scalesetID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetScaleSet gets scale set by ID
+*/
+func (a *Client) GetScaleSet(params *GetScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetScaleSet",
+ Method: "GET",
+ PathPattern: "/scalesets/{scalesetID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListScalesets lists all scalesets
+*/
+func (a *Client) ListScalesets(params *ListScalesetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListScalesetsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListScalesetsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListScalesets",
+ Method: "GET",
+ PathPattern: "/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListScalesetsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListScalesetsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListScalesetsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateScaleSet updates scale set by ID
+*/
+func (a *Client) UpdateScaleSet(params *UpdateScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateScaleSet",
+ Method: "PUT",
+ PathPattern: "/scalesets/{scalesetID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/scalesets/update_scale_set_parameters.go b/client/scalesets/update_scale_set_parameters.go
new file mode 100644
index 00000000..39668e9b
--- /dev/null
+++ b/client/scalesets/update_scale_set_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateScaleSetParams creates a new UpdateScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateScaleSetParams() *UpdateScaleSetParams {
+ return &UpdateScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateScaleSetParamsWithTimeout creates a new UpdateScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewUpdateScaleSetParamsWithTimeout(timeout time.Duration) *UpdateScaleSetParams {
+ return &UpdateScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateScaleSetParamsWithContext creates a new UpdateScaleSetParams object
+// with the ability to set a context for a request.
+func NewUpdateScaleSetParamsWithContext(ctx context.Context) *UpdateScaleSetParams {
+ return &UpdateScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateScaleSetParamsWithHTTPClient creates a new UpdateScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateScaleSetParamsWithHTTPClient(client *http.Client) *UpdateScaleSetParams {
+ return &UpdateScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the update scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateScaleSetParams struct {
+
+ /* Body.
+
+ Parameters to update the scale set with.
+ */
+ Body garm_params.UpdateScaleSetParams
+
+ /* ScalesetID.
+
+ ID of the scale set to update.
+ */
+ ScalesetID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateScaleSetParams) WithDefaults() *UpdateScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update scale set params
+func (o *UpdateScaleSetParams) WithTimeout(timeout time.Duration) *UpdateScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update scale set params
+func (o *UpdateScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update scale set params
+func (o *UpdateScaleSetParams) WithContext(ctx context.Context) *UpdateScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update scale set params
+func (o *UpdateScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update scale set params
+func (o *UpdateScaleSetParams) WithHTTPClient(client *http.Client) *UpdateScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update scale set params
+func (o *UpdateScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update scale set params
+func (o *UpdateScaleSetParams) WithBody(body garm_params.UpdateScaleSetParams) *UpdateScaleSetParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update scale set params
+func (o *UpdateScaleSetParams) SetBody(body garm_params.UpdateScaleSetParams) {
+ o.Body = body
+}
+
+// WithScalesetID adds the scalesetID to the update scale set params
+func (o *UpdateScaleSetParams) WithScalesetID(scalesetID string) *UpdateScaleSetParams {
+ o.SetScalesetID(scalesetID)
+ return o
+}
+
+// SetScalesetID adds the scalesetId to the update scale set params
+func (o *UpdateScaleSetParams) SetScalesetID(scalesetID string) {
+ o.ScalesetID = scalesetID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param scalesetID
+ if err := r.SetPathParam("scalesetID", o.ScalesetID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/scalesets/update_scale_set_responses.go b/client/scalesets/update_scale_set_responses.go
new file mode 100644
index 00000000..666e8256
--- /dev/null
+++ b/client/scalesets/update_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateScaleSetReader is a Reader for the UpdateScaleSet structure.
+type UpdateScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateScaleSetOK creates a UpdateScaleSetOK with default headers values
+func NewUpdateScaleSetOK() *UpdateScaleSetOK {
+ return &UpdateScaleSetOK{}
+}
+
+/*
+UpdateScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type UpdateScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this update scale set o k response has a 2xx status code
+func (o *UpdateScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update scale set o k response has a 3xx status code
+func (o *UpdateScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update scale set o k response has a 4xx status code
+func (o *UpdateScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update scale set o k response has a 5xx status code
+func (o *UpdateScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update scale set o k response a status code equal to that given
+func (o *UpdateScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update scale set o k response
+func (o *UpdateScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *UpdateScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /scalesets/{scalesetID}][%d] updateScaleSetOK %s", 200, payload)
+}
+
+func (o *UpdateScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /scalesets/{scalesetID}][%d] updateScaleSetOK %s", 200, payload)
+}
+
+func (o *UpdateScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *UpdateScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateScaleSetDefault creates a UpdateScaleSetDefault with default headers values
+func NewUpdateScaleSetDefault(code int) *UpdateScaleSetDefault {
+ return &UpdateScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update scale set default response has a 2xx status code
+func (o *UpdateScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update scale set default response has a 3xx status code
+func (o *UpdateScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update scale set default response has a 4xx status code
+func (o *UpdateScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update scale set default response has a 5xx status code
+func (o *UpdateScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update scale set default response a status code equal to that given
+func (o *UpdateScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update scale set default response
+func (o *UpdateScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /scalesets/{scalesetID}][%d] UpdateScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *UpdateScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /scalesets/{scalesetID}][%d] UpdateScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *UpdateScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/cmd/garm-cli/cmd/controller.go b/cmd/garm-cli/cmd/controller.go
new file mode 100644
index 00000000..c1326943
--- /dev/null
+++ b/cmd/garm-cli/cmd/controller.go
@@ -0,0 +1,188 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientController "github.com/cloudbase/garm/client/controller"
+ apiClientControllerInfo "github.com/cloudbase/garm/client/controller_info"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var controllerCmd = &cobra.Command{
+ Use: "controller",
+ Aliases: []string{"controller-info"},
+ SilenceUsage: true,
+ Short: "Controller operations",
+ Long: `Query or update information about the current controller.`,
+ Run: nil,
+}
+
+var controllerShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show information",
+ Long: `Show information about the current controller.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ showInfo := apiClientControllerInfo.NewControllerInfoParams()
+ response, err := apiCli.ControllerInfo.ControllerInfo(showInfo, authToken)
+ if err != nil {
+ return err
+ }
+ return formatInfo(response.Payload)
+ },
+}
+
+var controllerUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update controller information",
+ Long: `Update information about the current controller.
+
+Warning: Dragons ahead, please read carefully.
+
+Changing the URLs for the controller metadata, callback and webhooks, will
+impact the controller's ability to manage webhooks and runners.
+
+As GARM can be set up behind a reverse proxy or through several layers of
+network address translation or load balancing, we need to explicitly tell
+GARM how to reach each of these URLs. Internally, GARM sets up API endpoints
+as follows:
+
+ * /webhooks - the base URL for the webhooks. Github needs to reach this URL.
+ * /api/v1/metadata - the metadata URL. Your runners need to be able to reach this URL.
+ * /api/v1/callbacks - the callback URL. Your runners need to be able to reach this URL.
+
+You need to expose these endpoints to the interested parties (github or
+your runners), then you need to update the controller with the URLs you set up.
+
+For example, if you set the webhooks URL in your reverse proxy to
+https://garm.example.com/garm-hooks, this still needs to point to the "/webhooks"
+URL in the GARM backend, but in the controller info you need to set the URL to
+https://garm.example.com/garm-hooks using:
+
+ garm-cli controller update --webhook-url=https://garm.example.com/garm-hooks
+
+If you expose GARM to the outside world directly, or if you don't rewrite the URLs
+above in your reverse proxy config, use the above 3 endpoints without change,
+substituting garm.example.com with the correct hostname or IP address.
+
+In most cases, you will have a GARM backend (say 192.168.100.10) and a reverse
+proxy in front of it exposed as https://garm.example.com. If you don't rewrite
+the URLs in the reverse proxy, and you just point to your backend, you can set
+up the GARM controller URLs as:
+
+ garm-cli controller update \
+ --webhook-url=https://garm.example.com/webhooks \
+ --metadata-url=https://garm.example.com/api/v1/metadata \
+ --callback-url=https://garm.example.com/api/v1/callbacks
+`,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ params := params.UpdateControllerParams{}
+ if cmd.Flags().Changed("metadata-url") {
+ params.MetadataURL = &metadataURL
+ }
+ if cmd.Flags().Changed("callback-url") {
+ params.CallbackURL = &callbackURL
+ }
+ if cmd.Flags().Changed("webhook-url") {
+ params.WebhookURL = &webhookURL
+ }
+
+ if cmd.Flags().Changed("minimum-job-age-backoff") {
+ params.MinimumJobAgeBackoff = &minimumJobAgeBackoff
+ }
+
+ if params.WebhookURL == nil && params.MetadataURL == nil && params.CallbackURL == nil && params.MinimumJobAgeBackoff == nil {
+ cmd.Help()
+ return fmt.Errorf("at least one of minimum-job-age-backoff, metadata-url, callback-url or webhook-url must be provided")
+ }
+
+ updateUrlsReq := apiClientController.NewUpdateControllerParams()
+ updateUrlsReq.Body = params
+
+ info, err := apiCli.Controller.UpdateController(updateUrlsReq, authToken)
+ if err != nil {
+ return fmt.Errorf("error updating controller: %w", err)
+ }
+ formatInfo(info.Payload)
+ return nil
+ },
+}
+
+func renderControllerInfoTable(info params.ControllerInfo) string {
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+
+ if info.WebhookURL == "" {
+ info.WebhookURL = "N/A"
+ }
+
+ if info.ControllerWebhookURL == "" {
+ info.ControllerWebhookURL = "N/A"
+ }
+ serverVersion := "v0.0.0-unknown"
+ if info.Version != "" {
+ serverVersion = info.Version
+ }
+ t.AppendHeader(header)
+ t.AppendRow(table.Row{"Controller ID", info.ControllerID})
+ if info.Hostname != "" {
+ t.AppendRow(table.Row{"Hostname", info.Hostname})
+ }
+ t.AppendRow(table.Row{"Metadata URL", info.MetadataURL})
+ t.AppendRow(table.Row{"Callback URL", info.CallbackURL})
+ t.AppendRow(table.Row{"Webhook Base URL", info.WebhookURL})
+ t.AppendRow(table.Row{"Controller Webhook URL", info.ControllerWebhookURL})
+ t.AppendRow(table.Row{"Minimum Job Age Backoff", info.MinimumJobAgeBackoff})
+ t.AppendRow(table.Row{"Version", serverVersion})
+ return t.Render()
+}
+
+func formatInfo(info params.ControllerInfo) error {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(info)
+ return nil
+ }
+ fmt.Println(renderControllerInfoTable(info))
+ return nil
+}
+
+func init() {
+ controllerUpdateCmd.Flags().StringVarP(&metadataURL, "metadata-url", "m", "", "The metadata URL for the controller (ie. https://garm.example.com/api/v1/metadata)")
+ controllerUpdateCmd.Flags().StringVarP(&callbackURL, "callback-url", "c", "", "The callback URL for the controller (ie. https://garm.example.com/api/v1/callbacks)")
+ controllerUpdateCmd.Flags().StringVarP(&webhookURL, "webhook-url", "w", "", "The webhook URL for the controller (ie. https://garm.example.com/webhooks)")
+ controllerUpdateCmd.Flags().UintVarP(&minimumJobAgeBackoff, "minimum-job-age-backoff", "b", 0, "The minimum job age backoff for the controller")
+
+ controllerCmd.AddCommand(
+ controllerShowCmd,
+ controllerUpdateCmd,
+ )
+
+ rootCmd.AddCommand(controllerCmd)
+}
diff --git a/cmd/garm-cli/cmd/credentials.go b/cmd/garm-cli/cmd/credentials.go
deleted file mode 100644
index ec73c95f..00000000
--- a/cmd/garm-cli/cmd/credentials.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package cmd
-
-import (
- "fmt"
-
- apiClientCreds "github.com/cloudbase/garm/client/credentials"
- "github.com/cloudbase/garm/params"
-
- "github.com/jedib0t/go-pretty/v6/table"
- "github.com/spf13/cobra"
-)
-
-// credentialsCmd represents the credentials command
-var credentialsCmd = &cobra.Command{
- Use: "credentials",
- Aliases: []string{"creds"},
- Short: "List configured credentials",
- Long: `List all available credentials configured in the service
-config file.
-
-Currently, github personal tokens are configured statically in the config file
-of the garm service. This command lists the names of those credentials,
-which in turn can be used to define pools of runners withing repositories.`,
- Run: nil,
-}
-
-func init() {
- credentialsCmd.AddCommand(
- &cobra.Command{
- Use: "list",
- Aliases: []string{"ls"},
- Short: "List configured github credentials",
- Long: `List the names of the github personal access tokens availabe to the garm.`,
- SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
- if needsInit {
- return errNeedsInitError
- }
-
- listCredsReq := apiClientCreds.NewListCredentialsParams()
- response, err := apiCli.Credentials.ListCredentials(listCredsReq, authToken)
- if err != nil {
- return err
- }
- formatGithubCredentials(response.Payload)
- return nil
- },
- })
-
- rootCmd.AddCommand(credentialsCmd)
-}
-
-func formatGithubCredentials(creds []params.GithubCredentials) {
- t := table.NewWriter()
- header := table.Row{"Name", "Description", "Base URL", "API URL", "Upload URL"}
- t.AppendHeader(header)
- for _, val := range creds {
- t.AppendRow(table.Row{val.Name, val.Description, val.BaseURL, val.APIBaseURL, val.UploadBaseURL})
- t.AppendSeparator()
- }
- fmt.Println(t.Render())
-}
diff --git a/cmd/garm-cli/cmd/enterprise.go b/cmd/garm-cli/cmd/enterprise.go
index c63e0c44..5c937b81 100644
--- a/cmd/garm-cli/cmd/enterprise.go
+++ b/cmd/garm-cli/cmd/enterprise.go
@@ -16,16 +16,19 @@ package cmd
import (
"fmt"
-
- apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
- "github.com/cloudbase/garm/params"
+ "strings"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
enterpriseName string
+ enterpriseEndpoint string
enterpriseWebhookSecret string
enterpriseCreds string
)
@@ -50,16 +53,17 @@ var enterpriseAddCmd = &cobra.Command{
Short: "Add enterprise",
Long: `Add a new enterprise to the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
newEnterpriseReq := apiClientEnterprises.NewCreateEnterpriseParams()
newEnterpriseReq.Body = params.CreateEnterpriseParams{
- Name: enterpriseName,
- WebhookSecret: enterpriseWebhookSecret,
- CredentialsName: enterpriseCreds,
+ Name: enterpriseName,
+ WebhookSecret: enterpriseWebhookSecret,
+ CredentialsName: enterpriseCreds,
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
}
response, err := apiCli.Enterprises.CreateEnterprise(newEnterpriseReq, authToken)
if err != nil {
@@ -76,12 +80,14 @@ var enterpriseListCmd = &cobra.Command{
Short: "List enterprises",
Long: `List all configured enterprises that are currently managed.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
listEnterprisesReq := apiClientEnterprises.NewListEnterprisesParams()
+ listEnterprisesReq.Name = &enterpriseName
+ listEnterprisesReq.Endpoint = &enterpriseEndpoint
response, err := apiCli.Enterprises.ListEnterprises(listEnterprisesReq, authToken)
if err != nil {
return err
@@ -96,7 +102,7 @@ var enterpriseShowCmd = &cobra.Command{
Short: "Show details for one enterprise",
Long: `Displays detailed information about a single enterprise.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -106,8 +112,14 @@ var enterpriseShowCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+
+ enterpriseID, err := resolveEnterprise(args[0], enterpriseEndpoint)
+ if err != nil {
+ return err
+ }
+
showEnterpriseReq := apiClientEnterprises.NewGetEnterpriseParams()
- showEnterpriseReq.EnterpriseID = args[0]
+ showEnterpriseReq.EnterpriseID = enterpriseID
response, err := apiCli.Enterprises.GetEnterprise(showEnterpriseReq, authToken)
if err != nil {
return err
@@ -123,7 +135,7 @@ var enterpriseDeleteCmd = &cobra.Command{
Short: "Removes one enterprise",
Long: `Delete one enterprise from the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -133,8 +145,14 @@ var enterpriseDeleteCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+
+ enterpriseID, err := resolveEnterprise(args[0], enterpriseEndpoint)
+ if err != nil {
+ return err
+ }
+
deleteEnterpriseReq := apiClientEnterprises.NewDeleteEnterpriseParams()
- deleteEnterpriseReq.EnterpriseID = args[0]
+ deleteEnterpriseReq.EnterpriseID = enterpriseID
if err := apiCli.Enterprises.DeleteEnterprise(deleteEnterpriseReq, authToken); err != nil {
return err
}
@@ -147,7 +165,7 @@ var enterpriseUpdateCmd = &cobra.Command{
Short: "Update enterprise",
Long: `Update enterprise credentials or webhook secret.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -159,12 +177,18 @@ var enterpriseUpdateCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+ enterpriseID, err := resolveEnterprise(args[0], enterpriseEndpoint)
+ if err != nil {
+ return err
+ }
+
updateEnterpriseReq := apiClientEnterprises.NewUpdateEnterpriseParams()
updateEnterpriseReq.Body = params.UpdateEntityParams{
- WebhookSecret: repoWebhookSecret,
- CredentialsName: repoCreds,
+ WebhookSecret: repoWebhookSecret,
+ CredentialsName: repoCreds,
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
}
- updateEnterpriseReq.EnterpriseID = args[0]
+ updateEnterpriseReq.EnterpriseID = enterpriseID
response, err := apiCli.Enterprises.UpdateEnterprise(updateEnterpriseReq, authToken)
if err != nil {
return err
@@ -175,14 +199,24 @@ var enterpriseUpdateCmd = &cobra.Command{
}
func init() {
-
enterpriseAddCmd.Flags().StringVar(&enterpriseName, "name", "", "The name of the enterprise")
enterpriseAddCmd.Flags().StringVar(&enterpriseWebhookSecret, "webhook-secret", "", "The webhook secret for this enterprise")
enterpriseAddCmd.Flags().StringVar(&enterpriseCreds, "credentials", "", "Credentials name. See credentials list.")
+ enterpriseAddCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", string(params.PoolBalancerTypeRoundRobin), "The balancing strategy to use when creating runners in pools matching requested labels.")
+
+ enterpriseListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+ enterpriseListCmd.Flags().StringVarP(&enterpriseName, "name", "n", "", "Exact enterprise name to filter by.")
+ enterpriseListCmd.Flags().StringVarP(&enterpriseEndpoint, "endpoint", "e", "", "Exact endpoint name to filter by.")
+
enterpriseAddCmd.MarkFlagRequired("credentials") //nolint
enterpriseAddCmd.MarkFlagRequired("name") //nolint
enterpriseUpdateCmd.Flags().StringVar(&enterpriseWebhookSecret, "webhook-secret", "", "The webhook secret for this enterprise")
enterpriseUpdateCmd.Flags().StringVar(&enterpriseCreds, "credentials", "", "Credentials name. See credentials list.")
+ enterpriseUpdateCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", "", "The balancing strategy to use when creating runners in pools matching requested labels.")
+ enterpriseUpdateCmd.Flags().StringVar(&enterpriseEndpoint, "endpoint", "", "When using the name of the enterprise, the endpoint must be specified when multiple enterprises with the same name exist.")
+
+ enterpriseDeleteCmd.Flags().StringVar(&enterpriseEndpoint, "endpoint", "", "When using the name of the enterprise, the endpoint must be specified when multiple enterprises with the same name exist.")
+ enterpriseShowCmd.Flags().StringVar(&enterpriseEndpoint, "endpoint", "", "When using the name of the enterprise, the endpoint must be specified when multiple enterprises with the same name exist.")
enterpriseCmd.AddCommand(
enterpriseListCmd,
@@ -196,24 +230,43 @@ func init() {
}
func formatEnterprises(enterprises []params.Enterprise) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(enterprises)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"ID", "Name", "Credentials name", "Pool mgr running"}
+ header := table.Row{"ID", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Pool mgr running"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
t.AppendHeader(header)
for _, val := range enterprises {
- t.AppendRow(table.Row{val.ID, val.Name, val.CredentialsName, val.PoolManagerStatus.IsRunning})
+ row := table.Row{val.ID, val.Name, val.Endpoint.Name, val.Credentials.Name, val.GetBalancerType(), val.PoolManagerStatus.IsRunning}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatOneEnterprise(enterprise params.Enterprise) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(enterprise)
+ return
+ }
t := table.NewWriter()
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", enterprise.ID})
+ t.AppendRow(table.Row{"Created At", enterprise.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", enterprise.UpdatedAt})
t.AppendRow(table.Row{"Name", enterprise.Name})
- t.AppendRow(table.Row{"Credentials", enterprise.CredentialsName})
+ t.AppendRow(table.Row{"Endpoint", enterprise.Endpoint.Name})
+ t.AppendRow(table.Row{"Pool balancer type", enterprise.GetBalancerType()})
+ t.AppendRow(table.Row{"Credentials", enterprise.Credentials.Name})
t.AppendRow(table.Row{"Pool manager running", enterprise.PoolManagerStatus.IsRunning})
if !enterprise.PoolManagerStatus.IsRunning {
t.AppendRow(table.Row{"Failure reason", enterprise.PoolManagerStatus.FailureReason})
@@ -224,9 +277,15 @@ func formatOneEnterprise(enterprise params.Enterprise) {
t.AppendRow(table.Row{"Pools", pool.ID}, rowConfigAutoMerge)
}
}
+
+ if len(enterprise.Events) > 0 {
+ for _, event := range enterprise.Events {
+ t.AppendRow(table.Row{"Events", fmt.Sprintf("%s %s: %s", event.CreatedAt.Format("2006-01-02T15:04:05"), strings.ToUpper(string(event.EventLevel)), event.Message)}, rowConfigAutoMerge)
+ }
+ }
t.SetColumnConfigs([]table.ColumnConfig{
{Number: 1, AutoMerge: true},
- {Number: 2, AutoMerge: false},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
})
fmt.Println(t.Render())
diff --git a/cmd/garm-cli/cmd/events.go b/cmd/garm-cli/cmd/events.go
new file mode 100644
index 00000000..da44732a
--- /dev/null
+++ b/cmd/garm-cli/cmd/events.go
@@ -0,0 +1,65 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import (
+ "context"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/gorilla/websocket"
+ "github.com/spf13/cobra"
+
+ garmWs "github.com/cloudbase/garm-provider-common/util/websocket"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+)
+
+var signals = []os.Signal{
+ os.Interrupt,
+ syscall.SIGTERM,
+}
+
+var eventsCmd = &cobra.Command{
+ Use: "debug-events",
+ SilenceUsage: true,
+ Short: "Stream garm events",
+ Long: `Stream all garm events to the terminal.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ ctx, stop := signal.NotifyContext(context.Background(), signals...)
+ defer stop()
+
+ reader, err := garmWs.NewReader(ctx, mgr.BaseURL, "/api/v1/ws/events", mgr.Token, common.PrintWebsocketMessage)
+ if err != nil {
+ return err
+ }
+
+ if err := reader.Start(); err != nil {
+ return err
+ }
+
+ if eventsFilters != "" {
+ if err := reader.WriteMessage(websocket.TextMessage, []byte(eventsFilters)); err != nil {
+ return err
+ }
+ }
+ <-reader.Done()
+ return nil
+ },
+}
+
+func init() {
+ eventsCmd.Flags().StringVarP(&eventsFilters, "filters", "m", "", "Json with event filters you want to apply")
+ rootCmd.AddCommand(eventsCmd)
+}
diff --git a/cmd/garm-cli/cmd/gitea.go b/cmd/garm-cli/cmd/gitea.go
new file mode 100644
index 00000000..6627fd6f
--- /dev/null
+++ b/cmd/garm-cli/cmd/gitea.go
@@ -0,0 +1,34 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import "github.com/spf13/cobra"
+
+// giteaCmd represents the the gitea command. This command has a set
+// of subcommands that allow configuring and managing Gitea endpoints
+// and credentials.
+var giteaCmd = &cobra.Command{
+ Use: "gitea",
+ Aliases: []string{"gt"},
+ SilenceUsage: true,
+ Short: "Manage Gitea resources",
+ Long: `Manage Gitea related resources.
+
+This command allows you to configure and manage Gitea endpoints and credentials`,
+ Run: nil,
+}
+
+func init() {
+ rootCmd.AddCommand(giteaCmd)
+}
diff --git a/cmd/garm-cli/cmd/gitea_credentials.go b/cmd/garm-cli/cmd/gitea_credentials.go
new file mode 100644
index 00000000..d26f95ed
--- /dev/null
+++ b/cmd/garm-cli/cmd/gitea_credentials.go
@@ -0,0 +1,317 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientCreds "github.com/cloudbase/garm/client/credentials"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+// giteaCredentialsCmd represents the gitea credentials command
+var giteaCredentialsCmd = &cobra.Command{
+ Use: "credentials",
+ Aliases: []string{"creds"},
+ Short: "Manage gitea credentials",
+ Long: `Manage Gitea credentials stored in GARM.
+
+This command allows you to add, update, list and delete Gitea credentials.`,
+ Run: nil,
+}
+
+var giteaCredentialsListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List configured gitea credentials",
+ Long: `List the names of the gitea personal access tokens available to the garm.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ listCredsReq := apiClientCreds.NewListGiteaCredentialsParams()
+ response, err := apiCli.Credentials.ListGiteaCredentials(listCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatGiteaCredentials(response.Payload)
+ return nil
+ },
+}
+
+var giteaCredentialsShowCmd = &cobra.Command{
+ Use: "show",
+ Aliases: []string{"get"},
+ Short: "Show details of a configured gitea credential",
+ Long: `Show the details of a configured gitea credential.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+ showCredsReq := apiClientCreds.NewGetGiteaCredentialsParams().WithID(credID)
+ response, err := apiCli.Credentials.GetGiteaCredentials(showCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGiteaCredential(response.Payload)
+ return nil
+ },
+}
+
+var giteaCredentialsUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update a gitea credential",
+ Long: "Update a gitea credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+
+ updateParams, err := parseGiteaCredentialsUpdateParams()
+ if err != nil {
+ return err
+ }
+
+ updateCredsReq := apiClientCreds.NewUpdateGiteaCredentialsParams().WithID(credID)
+ updateCredsReq.Body = updateParams
+
+ response, err := apiCli.Credentials.UpdateGiteaCredentials(updateCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGiteaCredential(response.Payload)
+ return nil
+ },
+}
+
+var giteaCredentialsDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm"},
+ Short: "Delete a gitea credential",
+ Long: "Delete a gitea credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+
+ deleteCredsReq := apiClientCreds.NewDeleteGiteaCredentialsParams().WithID(credID)
+ if err := apiCli.Credentials.DeleteGiteaCredentials(deleteCredsReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var giteaCredentialsAddCmd = &cobra.Command{
+ Use: "add",
+ Short: "Add a gitea credential",
+ Long: "Add a gitea credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) > 0 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ addParams, err := parseGiteaCredentialsAddParams()
+ if err != nil {
+ return err
+ }
+
+ addCredsReq := apiClientCreds.NewCreateGiteaCredentialsParams()
+ addCredsReq.Body = addParams
+
+ response, err := apiCli.Credentials.CreateGiteaCredentials(addCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGiteaCredential(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ giteaCredentialsUpdateCmd.Flags().StringVar(&credentialsName, "name", "", "Name of the credential")
+ giteaCredentialsUpdateCmd.Flags().StringVar(&credentialsDescription, "description", "", "Description of the credential")
+ giteaCredentialsUpdateCmd.Flags().StringVar(&credentialsOAuthToken, "pat-oauth-token", "", "If the credential is a personal access token, the OAuth token")
+
+ giteaCredentialsListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsName, "name", "", "Name of the credential")
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsDescription, "description", "", "Description of the credential")
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsOAuthToken, "pat-oauth-token", "", "If the credential is a personal access token, the OAuth token")
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsType, "auth-type", "", "The type of the credential")
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsEndpoint, "endpoint", "", "The endpoint to associate the credential with")
+
+ giteaCredentialsAddCmd.MarkFlagRequired("name")
+ giteaCredentialsAddCmd.MarkFlagRequired("auth-type")
+ giteaCredentialsAddCmd.MarkFlagRequired("description")
+ giteaCredentialsAddCmd.MarkFlagRequired("endpoint")
+
+ giteaCredentialsCmd.AddCommand(
+ giteaCredentialsListCmd,
+ giteaCredentialsShowCmd,
+ giteaCredentialsUpdateCmd,
+ giteaCredentialsDeleteCmd,
+ giteaCredentialsAddCmd,
+ )
+ giteaCmd.AddCommand(giteaCredentialsCmd)
+}
+
+func parseGiteaCredentialsAddParams() (ret params.CreateGiteaCredentialsParams, err error) {
+ ret.Name = credentialsName
+ ret.Description = credentialsDescription
+ ret.AuthType = params.ForgeAuthType(credentialsType)
+ ret.Endpoint = credentialsEndpoint
+ switch ret.AuthType {
+ case params.ForgeAuthTypePAT:
+ ret.PAT.OAuth2Token = credentialsOAuthToken
+ default:
+ return params.CreateGiteaCredentialsParams{}, fmt.Errorf("invalid auth type: %s (supported are: pat)", credentialsType)
+ }
+
+ return ret, nil
+}
+
+func parseGiteaCredentialsUpdateParams() (params.UpdateGiteaCredentialsParams, error) {
+ var updateParams params.UpdateGiteaCredentialsParams
+
+ if credentialsName != "" {
+ updateParams.Name = &credentialsName
+ }
+
+ if credentialsDescription != "" {
+ updateParams.Description = &credentialsDescription
+ }
+
+ if credentialsOAuthToken != "" {
+ if updateParams.PAT == nil {
+ updateParams.PAT = ¶ms.GithubPAT{}
+ }
+ updateParams.PAT.OAuth2Token = credentialsOAuthToken
+ }
+
+ return updateParams, nil
+}
+
+func formatGiteaCredentials(creds []params.ForgeCredentials) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(creds)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"ID", "Name", "Description", "Base URL", "API URL", "Type"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
+ t.AppendHeader(header)
+ for _, val := range creds {
+ row := table.Row{val.ID, val.Name, val.Description, val.BaseURL, val.APIBaseURL, val.AuthType}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func formatOneGiteaCredential(cred params.ForgeCredentials) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(cred)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+ t.AppendHeader(header)
+
+ t.AppendRow(table.Row{"ID", cred.ID})
+ t.AppendRow(table.Row{"Created At", cred.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", cred.UpdatedAt})
+ t.AppendRow(table.Row{"Name", cred.Name})
+ t.AppendRow(table.Row{"Description", cred.Description})
+ t.AppendRow(table.Row{"Base URL", cred.BaseURL})
+ t.AppendRow(table.Row{"API URL", cred.APIBaseURL})
+ t.AppendRow(table.Row{"Type", cred.AuthType})
+ t.AppendRow(table.Row{"Endpoint", cred.Endpoint.Name})
+
+ if len(cred.Repositories) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, repo := range cred.Repositories {
+ t.AppendRow(table.Row{"Repositories", repo.String()})
+ }
+ }
+
+ if len(cred.Organizations) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, org := range cred.Organizations {
+ t.AppendRow(table.Row{"Organizations", org.Name})
+ }
+ }
+
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AutoMerge: true},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
+ })
+ fmt.Println(t.Render())
+}
diff --git a/cmd/garm-cli/cmd/gitea_endpoints.go b/cmd/garm-cli/cmd/gitea_endpoints.go
new file mode 100644
index 00000000..55fa09c9
--- /dev/null
+++ b/cmd/garm-cli/cmd/gitea_endpoints.go
@@ -0,0 +1,231 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ apiClientEndpoints "github.com/cloudbase/garm/client/endpoints"
+ "github.com/cloudbase/garm/params"
+)
+
+var giteaEndpointCmd = &cobra.Command{
+ Use: "endpoint",
+ SilenceUsage: true,
+ Short: "Manage Gitea endpoints",
+ Long: `Manage Gitea endpoints.
+
+This command allows you to configure and manage Gitea endpoints`,
+ Run: nil,
+}
+
+var giteaEndpointListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ SilenceUsage: true,
+ Short: "List Gitea endpoints",
+ Long: `List all configured Gitea endpoints.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ newListReq := apiClientEndpoints.NewListGiteaEndpointsParams()
+ response, err := apiCli.Endpoints.ListGiteaEndpoints(newListReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatEndpoints(response.Payload)
+ return nil
+ },
+}
+
+var giteaEndpointShowCmd = &cobra.Command{
+ Use: "show",
+ Aliases: []string{"get"},
+ SilenceUsage: true,
+ Short: "Show Gitea endpoint",
+ Long: `Show details of a Gitea endpoint.`,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ newShowReq := apiClientEndpoints.NewGetGiteaEndpointParams()
+ newShowReq.Name = args[0]
+ response, err := apiCli.Endpoints.GetGiteaEndpoint(newShowReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+var giteaEndpointCreateCmd = &cobra.Command{
+ Use: "create",
+ SilenceUsage: true,
+ Short: "Create Gitea endpoint",
+ Long: `Create a new Gitea endpoint.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ createParams, err := parseGiteaCreateParams()
+ if err != nil {
+ return err
+ }
+
+ newCreateReq := apiClientEndpoints.NewCreateGiteaEndpointParams()
+ newCreateReq.Body = createParams
+
+ response, err := apiCli.Endpoints.CreateGiteaEndpoint(newCreateReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+var giteaEndpointDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm"},
+ SilenceUsage: true,
+ Short: "Delete Gitea endpoint",
+ Long: "Delete a Gitea endpoint",
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ newDeleteReq := apiClientEndpoints.NewDeleteGiteaEndpointParams()
+ newDeleteReq.Name = args[0]
+ if err := apiCli.Endpoints.DeleteGiteaEndpoint(newDeleteReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var giteaEndpointUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update Gitea endpoint",
+ Long: "Update a Gitea endpoint",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ updateParams := params.UpdateGiteaEndpointParams{}
+
+ if cmd.Flags().Changed("ca-cert-path") {
+ cert, err := parseAndReadCABundle()
+ if err != nil {
+ return err
+ }
+ updateParams.CACertBundle = cert
+ }
+
+ if cmd.Flags().Changed("description") {
+ updateParams.Description = &endpointDescription
+ }
+
+ if cmd.Flags().Changed("base-url") {
+ updateParams.BaseURL = &endpointBaseURL
+ }
+
+ if cmd.Flags().Changed("api-base-url") {
+ updateParams.APIBaseURL = &endpointAPIBaseURL
+ }
+
+ newEndpointUpdateReq := apiClientEndpoints.NewUpdateGiteaEndpointParams()
+ newEndpointUpdateReq.Name = args[0]
+ newEndpointUpdateReq.Body = updateParams
+
+ response, err := apiCli.Endpoints.UpdateGiteaEndpoint(newEndpointUpdateReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointName, "name", "", "Name of the Gitea endpoint")
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointDescription, "description", "", "Description for the github endpoint")
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointBaseURL, "base-url", "", "Base URL of the Gitea endpoint")
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the Gitea endpoint")
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the Gitea endpoint")
+
+ giteaEndpointListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+
+ giteaEndpointCreateCmd.MarkFlagRequired("name")
+ giteaEndpointCreateCmd.MarkFlagRequired("base-url")
+ giteaEndpointCreateCmd.MarkFlagRequired("api-base-url")
+
+ giteaEndpointUpdateCmd.Flags().StringVar(&endpointDescription, "description", "", "Description for the gitea endpoint")
+ giteaEndpointUpdateCmd.Flags().StringVar(&endpointBaseURL, "base-url", "", "Base URL of the Gitea endpoint")
+ giteaEndpointUpdateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the Gitea endpoint")
+ giteaEndpointUpdateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the Gitea endpoint")
+
+ giteaEndpointCmd.AddCommand(
+ giteaEndpointListCmd,
+ giteaEndpointShowCmd,
+ giteaEndpointCreateCmd,
+ giteaEndpointDeleteCmd,
+ giteaEndpointUpdateCmd,
+ )
+
+ giteaCmd.AddCommand(giteaEndpointCmd)
+}
+
+func parseGiteaCreateParams() (params.CreateGiteaEndpointParams, error) {
+ certBundleBytes, err := parseAndReadCABundle()
+ if err != nil {
+ return params.CreateGiteaEndpointParams{}, err
+ }
+
+ ret := params.CreateGiteaEndpointParams{
+ Name: endpointName,
+ BaseURL: endpointBaseURL,
+ APIBaseURL: endpointAPIBaseURL,
+ Description: endpointDescription,
+ CACertBundle: certBundleBytes,
+ }
+ return ret, nil
+}
diff --git a/cmd/garm-cli/cmd/github.go b/cmd/garm-cli/cmd/github.go
new file mode 100644
index 00000000..71342026
--- /dev/null
+++ b/cmd/garm-cli/cmd/github.go
@@ -0,0 +1,43 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import "github.com/spf13/cobra"
+
+var (
+ endpointName string
+ endpointBaseURL string
+ endpointUploadURL string
+ endpointAPIBaseURL string
+ endpointCACertPath string
+ endpointDescription string
+)
+
+// githubCmd represents the the github command. This command has a set
+// of subcommands that allow configuring and managing GitHub endpoints
+// and credentials.
+var githubCmd = &cobra.Command{
+ Use: "github",
+ Aliases: []string{"gh"},
+ SilenceUsage: true,
+ Short: "Manage GitHub resources",
+ Long: `Manage GitHub related resources.
+
+This command allows you to configure and manage GitHub endpoints and credentials`,
+ Run: nil,
+}
+
+func init() {
+ rootCmd.AddCommand(githubCmd)
+}
diff --git a/cmd/garm-cli/cmd/github_credentials.go b/cmd/garm-cli/cmd/github_credentials.go
new file mode 100644
index 00000000..6f9b6409
--- /dev/null
+++ b/cmd/garm-cli/cmd/github_credentials.go
@@ -0,0 +1,425 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "os"
+ "strconv"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientCreds "github.com/cloudbase/garm/client/credentials"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var (
+ credentialsName string
+ credentialsDescription string
+ credentialsOAuthToken string
+ credentialsAppInstallationID int64
+ credentialsAppID int64
+ credentialsPrivateKeyPath string
+ credentialsType string
+ credentialsEndpoint string
+)
+
+// credentialsCmd represents the credentials command
+var credentialsCmd = &cobra.Command{
+ Use: "credentials",
+ Aliases: []string{"creds"},
+ Short: "List configured credentials. This is an alias for the github credentials command.",
+ Long: `List all available github credentials.
+
+This command is an alias for the garm-cli github credentials command.`,
+ Run: nil,
+}
+
+// githubCredentialsCmd represents the github credentials command
+var githubCredentialsCmd = &cobra.Command{
+ Use: "credentials",
+ Aliases: []string{"creds"},
+ Short: "Manage github credentials",
+ Long: `Manage GitHub credentials stored in GARM.
+
+This command allows you to add, update, list and delete GitHub credentials.`,
+ Run: nil,
+}
+
+var githubCredentialsListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List configured github credentials",
+ Long: `List the names of the github personal access tokens available to the garm.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ listCredsReq := apiClientCreds.NewListCredentialsParams()
+ response, err := apiCli.Credentials.ListCredentials(listCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatGithubCredentials(response.Payload)
+ return nil
+ },
+}
+
+var githubCredentialsShowCmd = &cobra.Command{
+ Use: "show",
+ Aliases: []string{"get"},
+ Short: "Show details of a configured github credential",
+ Long: `Show the details of a configured github credential.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+ showCredsReq := apiClientCreds.NewGetCredentialsParams().WithID(credID)
+ response, err := apiCli.Credentials.GetCredentials(showCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGithubCredential(response.Payload)
+ return nil
+ },
+}
+
+var githubCredentialsUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update a github credential",
+ Long: "Update a github credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+
+ updateParams, err := parseCredentialsUpdateParams()
+ if err != nil {
+ return err
+ }
+
+ updateCredsReq := apiClientCreds.NewUpdateCredentialsParams().WithID(credID)
+ updateCredsReq.Body = updateParams
+
+ response, err := apiCli.Credentials.UpdateCredentials(updateCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGithubCredential(response.Payload)
+ return nil
+ },
+}
+
+var githubCredentialsDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm"},
+ Short: "Delete a github credential",
+ Long: "Delete a github credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+
+ deleteCredsReq := apiClientCreds.NewDeleteCredentialsParams().WithID(credID)
+ if err := apiCli.Credentials.DeleteCredentials(deleteCredsReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var githubCredentialsAddCmd = &cobra.Command{
+ Use: "add",
+ Short: "Add a github credential",
+ Long: "Add a github credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) > 0 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ addParams, err := parseCredentialsAddParams()
+ if err != nil {
+ return err
+ }
+
+ addCredsReq := apiClientCreds.NewCreateCredentialsParams()
+ addCredsReq.Body = addParams
+
+ response, err := apiCli.Credentials.CreateCredentials(addCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGithubCredential(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ githubCredentialsUpdateCmd.Flags().StringVar(&credentialsName, "name", "", "Name of the credential")
+ githubCredentialsUpdateCmd.Flags().StringVar(&credentialsDescription, "description", "", "Description of the credential")
+ githubCredentialsUpdateCmd.Flags().StringVar(&credentialsOAuthToken, "pat-oauth-token", "", "If the credential is a personal access token, the OAuth token")
+ githubCredentialsUpdateCmd.Flags().Int64Var(&credentialsAppInstallationID, "app-installation-id", 0, "If the credential is an app, the installation ID")
+ githubCredentialsUpdateCmd.Flags().Int64Var(&credentialsAppID, "app-id", 0, "If the credential is an app, the app ID")
+ githubCredentialsUpdateCmd.Flags().StringVar(&credentialsPrivateKeyPath, "private-key-path", "", "If the credential is an app, the path to the private key file")
+
+ githubCredentialsListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+
+ githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-installation-id")
+ githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-id")
+ githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "private-key-path")
+ githubCredentialsUpdateCmd.MarkFlagsRequiredTogether("app-installation-id", "app-id", "private-key-path")
+
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsName, "name", "", "Name of the credential")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsDescription, "description", "", "Description of the credential")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsOAuthToken, "pat-oauth-token", "", "If the credential is a personal access token, the OAuth token")
+ githubCredentialsAddCmd.Flags().Int64Var(&credentialsAppInstallationID, "app-installation-id", 0, "If the credential is an app, the installation ID")
+ githubCredentialsAddCmd.Flags().Int64Var(&credentialsAppID, "app-id", 0, "If the credential is an app, the app ID")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsPrivateKeyPath, "private-key-path", "", "If the credential is an app, the path to the private key file")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsType, "auth-type", "", "The type of the credential")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsEndpoint, "endpoint", "", "The endpoint to associate the credential with")
+
+ githubCredentialsAddCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-installation-id")
+ githubCredentialsAddCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-id")
+ githubCredentialsAddCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "private-key-path")
+ githubCredentialsAddCmd.MarkFlagsRequiredTogether("app-installation-id", "app-id", "private-key-path")
+
+ githubCredentialsAddCmd.MarkFlagRequired("name")
+ githubCredentialsAddCmd.MarkFlagRequired("auth-type")
+ githubCredentialsAddCmd.MarkFlagRequired("description")
+ githubCredentialsAddCmd.MarkFlagRequired("endpoint")
+
+ githubCredentialsCmd.AddCommand(
+ githubCredentialsListCmd,
+ githubCredentialsShowCmd,
+ githubCredentialsUpdateCmd,
+ githubCredentialsDeleteCmd,
+ githubCredentialsAddCmd,
+ )
+ githubCmd.AddCommand(githubCredentialsCmd)
+
+ credentialsCmd.AddCommand(githubCredentialsListCmd)
+ rootCmd.AddCommand(credentialsCmd)
+}
+
+func parsePrivateKeyFromPath(path string) ([]byte, error) {
+ if _, err := os.Stat(path); err != nil {
+ return nil, fmt.Errorf("private key file not found: %s", credentialsPrivateKeyPath)
+ }
+ keyContents, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read private key file: %w", err)
+ }
+ pemBlock, _ := pem.Decode(keyContents)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("failed to decode PEM block")
+ }
+ if _, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err != nil {
+ return nil, fmt.Errorf("failed to parse private key: %w", err)
+ }
+ return keyContents, nil
+}
+
+func parseCredentialsAddParams() (ret params.CreateGithubCredentialsParams, err error) {
+ ret.Name = credentialsName
+ ret.Description = credentialsDescription
+ ret.AuthType = params.ForgeAuthType(credentialsType)
+ ret.Endpoint = credentialsEndpoint
+ switch ret.AuthType {
+ case params.ForgeAuthTypePAT:
+ ret.PAT.OAuth2Token = credentialsOAuthToken
+ case params.ForgeAuthTypeApp:
+ ret.App.InstallationID = credentialsAppInstallationID
+ ret.App.AppID = credentialsAppID
+ keyContents, err := parsePrivateKeyFromPath(credentialsPrivateKeyPath)
+ if err != nil {
+ return params.CreateGithubCredentialsParams{}, err
+ }
+ ret.App.PrivateKeyBytes = keyContents
+ default:
+ return params.CreateGithubCredentialsParams{}, fmt.Errorf("invalid auth type: %s (supported are: app, pat)", credentialsType)
+ }
+
+ return ret, nil
+}
+
+func parseCredentialsUpdateParams() (params.UpdateGithubCredentialsParams, error) {
+ var updateParams params.UpdateGithubCredentialsParams
+
+ if credentialsAppInstallationID != 0 || credentialsAppID != 0 || credentialsPrivateKeyPath != "" {
+ updateParams.App = ¶ms.GithubApp{}
+ }
+
+ if credentialsName != "" {
+ updateParams.Name = &credentialsName
+ }
+
+ if credentialsDescription != "" {
+ updateParams.Description = &credentialsDescription
+ }
+
+ if credentialsOAuthToken != "" {
+ if updateParams.PAT == nil {
+ updateParams.PAT = ¶ms.GithubPAT{}
+ }
+ updateParams.PAT.OAuth2Token = credentialsOAuthToken
+ }
+
+ if credentialsAppInstallationID != 0 {
+ updateParams.App.InstallationID = credentialsAppInstallationID
+ }
+
+ if credentialsAppID != 0 {
+ updateParams.App.AppID = credentialsAppID
+ }
+
+ if credentialsPrivateKeyPath != "" {
+ keyContents, err := parsePrivateKeyFromPath(credentialsPrivateKeyPath)
+ if err != nil {
+ return params.UpdateGithubCredentialsParams{}, err
+ }
+ updateParams.App.PrivateKeyBytes = keyContents
+ }
+
+ return updateParams, nil
+}
+
+func formatGithubCredentials(creds []params.ForgeCredentials) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(creds)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"ID", "Name", "Description", "Base URL", "API URL", "Upload URL", "Type"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
+ t.AppendHeader(header)
+ for _, val := range creds {
+ row := table.Row{val.ID, val.Name, val.Description, val.BaseURL, val.APIBaseURL, val.UploadBaseURL, val.AuthType}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func formatOneGithubCredential(cred params.ForgeCredentials) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(cred)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+ t.AppendHeader(header)
+
+ var resetMinutes float64
+ if cred.RateLimit != nil {
+ resetMinutes = cred.RateLimit.ResetIn().Minutes()
+ }
+
+ t.AppendRow(table.Row{"ID", cred.ID})
+ t.AppendRow(table.Row{"Created At", cred.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", cred.UpdatedAt})
+ t.AppendRow(table.Row{"Name", cred.Name})
+ t.AppendRow(table.Row{"Description", cred.Description})
+ t.AppendRow(table.Row{"Base URL", cred.BaseURL})
+ t.AppendRow(table.Row{"API URL", cred.APIBaseURL})
+ t.AppendRow(table.Row{"Upload URL", cred.UploadBaseURL})
+ t.AppendRow(table.Row{"Type", cred.AuthType})
+ t.AppendRow(table.Row{"Endpoint", cred.Endpoint.Name})
+ if resetMinutes > 0 {
+ t.AppendRow(table.Row{"", ""})
+ t.AppendRow(table.Row{"Remaining API requests", cred.RateLimit.Remaining})
+ t.AppendRow(table.Row{"Rate limit reset", fmt.Sprintf("%d minutes", int64(resetMinutes))})
+ }
+
+ if len(cred.Repositories) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, repo := range cred.Repositories {
+ t.AppendRow(table.Row{"Repositories", repo.String()})
+ }
+ }
+
+ if len(cred.Organizations) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, org := range cred.Organizations {
+ t.AppendRow(table.Row{"Organizations", org.Name})
+ }
+ }
+
+ if len(cred.Enterprises) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, ent := range cred.Enterprises {
+ t.AppendRow(table.Row{"Enterprises", ent.Name})
+ }
+ }
+
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AutoMerge: true},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
+ })
+ fmt.Println(t.Render())
+}
diff --git a/cmd/garm-cli/cmd/github_endpoints.go b/cmd/garm-cli/cmd/github_endpoints.go
new file mode 100644
index 00000000..61f46810
--- /dev/null
+++ b/cmd/garm-cli/cmd/github_endpoints.go
@@ -0,0 +1,315 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "os"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientEndpoints "github.com/cloudbase/garm/client/endpoints"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var githubEndpointCmd = &cobra.Command{
+ Use: "endpoint",
+ SilenceUsage: true,
+ Short: "Manage GitHub endpoints",
+ Long: `Manage GitHub endpoints.
+
+This command allows you to configure and manage GitHub endpoints`,
+ Run: nil,
+}
+
+var githubEndpointListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ SilenceUsage: true,
+ Short: "List GitHub endpoints",
+ Long: `List all configured GitHub endpoints.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ newGHListReq := apiClientEndpoints.NewListGithubEndpointsParams()
+ response, err := apiCli.Endpoints.ListGithubEndpoints(newGHListReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatEndpoints(response.Payload)
+ return nil
+ },
+}
+
+var githubEndpointShowCmd = &cobra.Command{
+ Use: "show",
+ Aliases: []string{"get"},
+ SilenceUsage: true,
+ Short: "Show GitHub endpoint",
+ Long: `Show details of a GitHub endpoint.`,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ newGHShowReq := apiClientEndpoints.NewGetGithubEndpointParams()
+ newGHShowReq.Name = args[0]
+ response, err := apiCli.Endpoints.GetGithubEndpoint(newGHShowReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+var githubEndpointCreateCmd = &cobra.Command{
+ Use: "create",
+ SilenceUsage: true,
+ Short: "Create GitHub endpoint",
+ Long: `Create a new GitHub endpoint.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ createParams, err := parseCreateParams()
+ if err != nil {
+ return err
+ }
+
+ newGHCreateReq := apiClientEndpoints.NewCreateGithubEndpointParams()
+ newGHCreateReq.Body = createParams
+
+ response, err := apiCli.Endpoints.CreateGithubEndpoint(newGHCreateReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+var githubEndpointDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm"},
+ SilenceUsage: true,
+ Short: "Delete GitHub endpoint",
+ Long: "Delete a GitHub endpoint",
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ newGHDeleteReq := apiClientEndpoints.NewDeleteGithubEndpointParams()
+ newGHDeleteReq.Name = args[0]
+ if err := apiCli.Endpoints.DeleteGithubEndpoint(newGHDeleteReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var githubEndpointUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update GitHub endpoint",
+ Long: "Update a GitHub endpoint",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ updateParams := params.UpdateGithubEndpointParams{}
+
+ if cmd.Flags().Changed("ca-cert-path") {
+ cert, err := parseAndReadCABundle()
+ if err != nil {
+ return err
+ }
+ updateParams.CACertBundle = cert
+ }
+
+ if cmd.Flags().Changed("description") {
+ updateParams.Description = &endpointDescription
+ }
+
+ if cmd.Flags().Changed("base-url") {
+ updateParams.BaseURL = &endpointBaseURL
+ }
+
+ if cmd.Flags().Changed("upload-url") {
+ updateParams.UploadBaseURL = &endpointUploadURL
+ }
+
+ if cmd.Flags().Changed("api-base-url") {
+ updateParams.APIBaseURL = &endpointAPIBaseURL
+ }
+
+ newGHEndpointUpdateReq := apiClientEndpoints.NewUpdateGithubEndpointParams()
+ newGHEndpointUpdateReq.Name = args[0]
+ newGHEndpointUpdateReq.Body = updateParams
+
+ response, err := apiCli.Endpoints.UpdateGithubEndpoint(newGHEndpointUpdateReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ githubEndpointCreateCmd.Flags().StringVar(&endpointName, "name", "", "Name of the GitHub endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointDescription, "description", "", "Description for the github endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointBaseURL, "base-url", "", "Base URL of the GitHub endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointUploadURL, "upload-url", "", "Upload URL of the GitHub endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the GitHub endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the GitHub endpoint")
+
+ githubEndpointListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+
+ githubEndpointCreateCmd.MarkFlagRequired("name")
+ githubEndpointCreateCmd.MarkFlagRequired("base-url")
+ githubEndpointCreateCmd.MarkFlagRequired("api-base-url")
+ githubEndpointCreateCmd.MarkFlagRequired("upload-url")
+
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointDescription, "description", "", "Description for the github endpoint")
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointBaseURL, "base-url", "", "Base URL of the GitHub endpoint")
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointUploadURL, "upload-url", "", "Upload URL of the GitHub endpoint")
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the GitHub endpoint")
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the GitHub endpoint")
+
+ githubEndpointCmd.AddCommand(
+ githubEndpointListCmd,
+ githubEndpointShowCmd,
+ githubEndpointCreateCmd,
+ githubEndpointDeleteCmd,
+ githubEndpointUpdateCmd,
+ )
+
+ githubCmd.AddCommand(githubEndpointCmd)
+}
+
+func parseAndReadCABundle() ([]byte, error) {
+ if endpointCACertPath == "" {
+ return nil, nil
+ }
+
+ if _, err := os.Stat(endpointCACertPath); os.IsNotExist(err) {
+ return nil, fmt.Errorf("CA cert file not found: %s", endpointCACertPath)
+ }
+ contents, err := os.ReadFile(endpointCACertPath)
+ if err != nil {
+ return nil, err
+ }
+ pemBlock, _ := pem.Decode(contents)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("failed to decode PEM block")
+ }
+ if _, err := x509.ParseCertificates(pemBlock.Bytes); err != nil {
+ return nil, fmt.Errorf("failed to parse CA cert bundle: %w", err)
+ }
+ return contents, nil
+}
+
+func parseCreateParams() (params.CreateGithubEndpointParams, error) {
+ certBundleBytes, err := parseAndReadCABundle()
+ if err != nil {
+ return params.CreateGithubEndpointParams{}, err
+ }
+
+ ret := params.CreateGithubEndpointParams{
+ Name: endpointName,
+ BaseURL: endpointBaseURL,
+ UploadBaseURL: endpointUploadURL,
+ APIBaseURL: endpointAPIBaseURL,
+ Description: endpointDescription,
+ CACertBundle: certBundleBytes,
+ }
+ return ret, nil
+}
+
+func formatEndpoints(endpoints params.ForgeEndpoints) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(endpoints)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"Name", "Base URL", "Description"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
+ t.AppendHeader(header)
+ for _, val := range endpoints {
+ row := table.Row{val.Name, val.BaseURL, val.Description}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func formatOneEndpoint(endpoint params.ForgeEndpoint) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(endpoint)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+ t.AppendHeader(header)
+ t.AppendRow([]interface{}{"Name", endpoint.Name})
+ t.AppendRow([]interface{}{"Description", endpoint.Description})
+ t.AppendRow([]interface{}{"Created At", endpoint.CreatedAt})
+ t.AppendRow([]interface{}{"Updated At", endpoint.UpdatedAt})
+ t.AppendRow([]interface{}{"Base URL", endpoint.BaseURL})
+ if endpoint.UploadBaseURL != "" {
+ t.AppendRow([]interface{}{"Upload URL", endpoint.UploadBaseURL})
+ }
+ t.AppendRow([]interface{}{"API Base URL", endpoint.APIBaseURL})
+ if len(endpoint.CACertBundle) > 0 {
+ t.AppendRow([]interface{}{"CA Cert Bundle", string(endpoint.CACertBundle)})
+ }
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AutoMerge: true},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
+ })
+ fmt.Println(t.Render())
+}
diff --git a/cmd/garm-cli/cmd/init.go b/cmd/garm-cli/cmd/init.go
index b85c5a8f..c544699e 100644
--- a/cmd/garm-cli/cmd/init.go
+++ b/cmd/garm-cli/cmd/init.go
@@ -16,17 +16,26 @@ package cmd
import (
"fmt"
+ "net/url"
"strings"
+ openapiRuntimeClient "github.com/go-openapi/runtime/client"
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientController "github.com/cloudbase/garm/client/controller"
+ apiClientFirstRun "github.com/cloudbase/garm/client/first_run"
+ apiClientLogin "github.com/cloudbase/garm/client/login"
"github.com/cloudbase/garm/cmd/garm-cli/common"
"github.com/cloudbase/garm/cmd/garm-cli/config"
"github.com/cloudbase/garm/params"
+)
- apiClientFirstRun "github.com/cloudbase/garm/client/first_run"
- apiClientLogin "github.com/cloudbase/garm/client/login"
- "github.com/jedib0t/go-pretty/v6/table"
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
+var (
+ callbackURL string
+ metadataURL string
+ webhookURL string
+ minimumJobAgeBackoff uint
)
// initCmd represents the init command
@@ -45,17 +54,20 @@ Example usage:
garm-cli init --name=dev --url=https://runner.example.com --username=admin --password=superSecretPassword
`,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if cfg != nil {
if cfg.HasManager(loginProfileName) {
return fmt.Errorf("a manager with name %s already exists in your local config", loginProfileName)
}
}
+ url := strings.TrimSuffix(loginURL, "/")
if err := promptUnsetInitVariables(); err != nil {
return err
}
+ ensureDefaultEndpoints(url)
+
newUserReq := apiClientFirstRun.NewFirstRunParams()
newUserReq.Body = params.NewUserParams{
Username: loginUserName,
@@ -63,14 +75,11 @@ garm-cli init --name=dev --url=https://runner.example.com --username=admin --pas
FullName: loginFullName,
Email: loginEmail,
}
-
- url := strings.TrimSuffix(loginURL, "/")
-
- initApiClient(url, "")
+ initAPIClient(url, "")
response, err := apiCli.FirstRun.FirstRun(newUserReq, authToken)
if err != nil {
- return errors.Wrap(err, "initializing manager")
+ return fmt.Errorf("error initializing manager: %w", err)
}
newLoginParamsReq := apiClientLogin.NewLoginParams()
@@ -81,7 +90,7 @@ garm-cli init --name=dev --url=https://runner.example.com --username=admin --pas
token, err := apiCli.Login.Login(newLoginParamsReq, authToken)
if err != nil {
- return errors.Wrap(err, "authenticating")
+ return fmt.Errorf("error authenticating: %w", err)
}
cfg.Managers = append(cfg.Managers, config.Manager{
@@ -90,17 +99,50 @@ garm-cli init --name=dev --url=https://runner.example.com --username=admin --pas
Token: token.Payload.Token,
})
+ authToken = openapiRuntimeClient.BearerToken(token.Payload.Token)
cfg.ActiveManager = loginProfileName
if err := cfg.SaveConfig(); err != nil {
- return errors.Wrap(err, "saving config")
+ return fmt.Errorf("error saving config: %w", err)
}
- renderUserTable(response.Payload)
+ updateUrlsReq := apiClientController.NewUpdateControllerParams()
+ updateUrlsReq.Body = params.UpdateControllerParams{
+ MetadataURL: &metadataURL,
+ CallbackURL: &callbackURL,
+ WebhookURL: &webhookURL,
+ }
+
+ controllerInfoResponse, err := apiCli.Controller.UpdateController(updateUrlsReq, authToken)
+ renderResponseMessage(response.Payload, controllerInfoResponse, err)
return nil
},
}
+func ensureDefaultEndpoints(loginURL string) (err error) {
+ if metadataURL == "" {
+ metadataURL, err = url.JoinPath(loginURL, "api/v1/metadata")
+ if err != nil {
+ return err
+ }
+ }
+
+ if callbackURL == "" {
+ callbackURL, err = url.JoinPath(loginURL, "api/v1/callbacks")
+ if err != nil {
+ return err
+ }
+ }
+
+ if webhookURL == "" {
+ webhookURL, err = url.JoinPath(loginURL, "webhooks")
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func promptUnsetInitVariables() error {
var err error
if loginUserName == "" {
@@ -118,11 +160,18 @@ func promptUnsetInitVariables() error {
}
if loginPassword == "" {
- loginPassword, err = common.PromptPassword("Password")
+ passwd, err := common.PromptPassword("Password", "")
if err != nil {
return err
}
+
+ _, err = common.PromptPassword("Confirm password", passwd)
+ if err != nil {
+ return err
+ }
+ loginPassword = passwd
}
+
return nil
}
@@ -133,13 +182,16 @@ func init() {
initCmd.Flags().StringVarP(&loginURL, "url", "a", "", "The base URL for the runner manager API")
initCmd.Flags().StringVarP(&loginUserName, "username", "u", "", "The desired administrative username")
initCmd.Flags().StringVarP(&loginEmail, "email", "e", "", "Email address")
+ initCmd.Flags().StringVarP(&metadataURL, "metadata-url", "m", "", "The metadata URL for the controller (ie. https://garm.example.com/api/v1/metadata)")
+ initCmd.Flags().StringVarP(&callbackURL, "callback-url", "c", "", "The callback URL for the controller (ie. https://garm.example.com/api/v1/callbacks)")
+ initCmd.Flags().StringVarP(&webhookURL, "webhook-url", "w", "", "The webhook URL for the controller (ie. https://garm.example.com/webhooks)")
initCmd.Flags().StringVarP(&loginFullName, "full-name", "f", "", "Full name of the user")
initCmd.Flags().StringVarP(&loginPassword, "password", "p", "", "The admin password")
initCmd.MarkFlagRequired("name") //nolint
initCmd.MarkFlagRequired("url") //nolint
}
-func renderUserTable(user params.User) {
+func renderUserTable(user params.User) string {
t := table.NewWriter()
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
@@ -148,5 +200,53 @@ func renderUserTable(user params.User) {
t.AppendRow(table.Row{"Username", user.Username})
t.AppendRow(table.Row{"Email", user.Email})
t.AppendRow(table.Row{"Enabled", user.Enabled})
- fmt.Println(t.Render())
+ return t.Render()
+}
+
+func renderResponseMessage(user params.User, controllerInfo *apiClientController.UpdateControllerOK, controllerURLUpdateErr error) {
+ headerMsg := `Congrats! Your controller is now initialized.
+
+Following are the details of the admin user and details about the controller.
+
+Admin user information:
+
+%s
+`
+
+ controllerMsg := `Controller information:
+
+%s
+
+Make sure that the URLs in the table above are reachable by the relevant parties.
+
+The metadata and callback URLs *must* be accessible by the runners that GARM spins up.
+The base webhook and the controller webhook URLs must be accessible by GitHub or GHES.
+`
+
+ controllerErrorMsg := `WARNING: Failed to set the required controller URLs with error: %q
+
+Please run:
+
+ garm-cli controller show
+
+To make sure that the callback, metadata and webhook URLs are set correctly. If not,
+you must set them up by running:
+
+ garm-cli controller update \
+ --metadata-url= \
+ --callback-url= \
+ --webhook-url=
+
+See the help message for garm-cli controller update for more information.
+`
+ var ctrlMsg string
+ if controllerURLUpdateErr != nil || controllerInfo == nil {
+ ctrlMsg = fmt.Sprintf(controllerErrorMsg, controllerURLUpdateErr)
+ } else {
+ controllerInfoTable := renderControllerInfoTable(controllerInfo.Payload)
+ ctrlMsg = fmt.Sprintf(controllerMsg, controllerInfoTable)
+ }
+
+ userTable := renderUserTable(user)
+ fmt.Printf("%s\n%s\n", fmt.Sprintf(headerMsg, userTable), ctrlMsg)
}
diff --git a/cmd/garm-cli/cmd/jobs.go b/cmd/garm-cli/cmd/jobs.go
index 36f61e8e..1ce372cb 100644
--- a/cmd/garm-cli/cmd/jobs.go
+++ b/cmd/garm-cli/cmd/jobs.go
@@ -18,11 +18,13 @@ import (
"fmt"
"strings"
- apiClientJobs "github.com/cloudbase/garm/client/jobs"
- "github.com/cloudbase/garm/params"
"github.com/google/uuid"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ apiClientJobs "github.com/cloudbase/garm/client/jobs"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
// runnerCmd represents the runner command
@@ -40,7 +42,7 @@ var jobsListCmd = &cobra.Command{
Short: "List jobs",
Long: `List all jobs currently recorded in the system.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
@@ -56,6 +58,10 @@ var jobsListCmd = &cobra.Command{
}
func formatJobs(jobs []params.Job) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(jobs)
+ return
+ }
t := table.NewWriter()
header := table.Row{"ID", "Name", "Status", "Conclusion", "Runner Name", "Repository", "Requested Labels", "Locked by"}
t.AppendHeader(header)
diff --git a/cmd/garm-cli/cmd/log.go b/cmd/garm-cli/cmd/log.go
index 9e6669ce..a7d2dfba 100644
--- a/cmd/garm-cli/cmd/log.go
+++ b/cmd/garm-cli/cmd/log.go
@@ -1,20 +1,34 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package cmd
import (
- "encoding/json"
- "fmt"
- "log"
- "net/http"
- "net/url"
- "os"
+ "context"
"os/signal"
- "time"
+ "strings"
- "github.com/cloudbase/garm-provider-common/util"
- apiParams "github.com/cloudbase/garm/apiserver/params"
-
- "github.com/gorilla/websocket"
"github.com/spf13/cobra"
+
+ garmWs "github.com/cloudbase/garm-provider-common/util/websocket"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+)
+
+var (
+ eventsFilters string
+ logLevel string
+ filters []string
+ enableColor bool
)
var logCmd = &cobra.Command{
@@ -22,79 +36,40 @@ var logCmd = &cobra.Command{
SilenceUsage: true,
Short: "Stream garm log",
Long: `Stream all garm logging to the terminal.`,
- RunE: func(cmd *cobra.Command, args []string) error {
- interrupt := make(chan os.Signal, 1)
- signal.Notify(interrupt, os.Interrupt)
+ RunE: func(_ *cobra.Command, _ []string) error {
+ ctx, stop := signal.NotifyContext(context.Background(), signals...)
+ defer stop()
- parsedURL, err := url.Parse(mgr.BaseURL)
+ // Parse filters into map
+ attributeFilters := make(map[string]string)
+ for _, filter := range filters {
+ parts := strings.SplitN(filter, "=", 2)
+ if len(parts) == 2 {
+ attributeFilters[parts[0]] = parts[1]
+ }
+ }
+
+ // Create log formatter with filters
+ logFormatter := common.NewLogFormatter(logLevel, attributeFilters, enableColor)
+
+ reader, err := garmWs.NewReader(ctx, mgr.BaseURL, "/api/v1/ws/logs", mgr.Token, logFormatter.FormatWebsocketMessage)
if err != nil {
return err
}
- wsScheme := "ws"
- if parsedURL.Scheme == "https" {
- wsScheme = "wss"
+ if err := reader.Start(); err != nil {
+ return err
}
- u := url.URL{Scheme: wsScheme, Host: parsedURL.Host, Path: "/api/v1/ws"}
- log.Printf("connecting to %s", u.String())
- header := http.Header{}
- header.Add("Authorization", fmt.Sprintf("Bearer %s", mgr.Token))
-
- c, response, err := websocket.DefaultDialer.Dial(u.String(), header)
- if err != nil {
- var resp apiParams.APIErrorResponse
- var msg string
- if err := json.NewDecoder(response.Body).Decode(&resp); err == nil {
- msg = resp.Details
- }
- log.Fatalf("failed to stream logs: %s (%s)", msg, response.Status)
- }
- defer c.Close()
-
- done := make(chan struct{})
-
- go func() {
- defer close(done)
- for {
- _, message, err := c.ReadMessage()
- if err != nil {
- log.Printf("read: %q", err)
- return
- }
- log.Print(util.SanitizeLogEntry(string(message)))
- }
- }()
-
- ticker := time.NewTicker(time.Second)
- defer ticker.Stop()
-
- for {
- select {
- case <-done:
- return nil
- case t := <-ticker.C:
- err := c.WriteMessage(websocket.TextMessage, []byte(t.String()))
- if err != nil {
- return err
- }
- case <-interrupt:
- // Cleanly close the connection by sending a close message and then
- // waiting (with timeout) for the server to close the connection.
- err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
- if err != nil {
- return err
- }
- select {
- case <-done:
- case <-time.After(time.Second):
- }
- return nil
- }
- }
+ <-reader.Done()
+ return nil
},
}
func init() {
+ logCmd.Flags().StringVar(&logLevel, "log-level", "", "Minimum log level to display (DEBUG, INFO, WARN, ERROR)")
+ logCmd.Flags().StringArrayVar(&filters, "filter", []string{}, "Filter logs by attribute (format: key=value) or message content (msg=text). You can specify this option multiple times. The filter will return true for any of the attributes you set.")
+ logCmd.Flags().BoolVar(&enableColor, "enable-color", true, "Enable color logging (auto-detects terminal support)")
+
rootCmd.AddCommand(logCmd)
}
diff --git a/cmd/garm-cli/cmd/metrics.go b/cmd/garm-cli/cmd/metrics.go
index e79d9456..ea1fd7ca 100644
--- a/cmd/garm-cli/cmd/metrics.go
+++ b/cmd/garm-cli/cmd/metrics.go
@@ -17,8 +17,9 @@ package cmd
import (
"fmt"
- apiClientMetricToken "github.com/cloudbase/garm/client/metrics_token"
"github.com/spf13/cobra"
+
+ apiClientMetricToken "github.com/cloudbase/garm/client/metrics_token"
)
// orgPoolCmd represents the pool command
@@ -35,7 +36,7 @@ var metricsTokenCreateCmd = &cobra.Command{
Short: "Create a metrics token",
Long: `Create a metrics token.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
diff --git a/cmd/garm-cli/cmd/organization.go b/cmd/garm-cli/cmd/organization.go
index 56de989b..b16812fa 100644
--- a/cmd/garm-cli/cmd/organization.go
+++ b/cmd/garm-cli/cmd/organization.go
@@ -16,18 +16,26 @@ package cmd
import (
"fmt"
-
- apiClientOrgs "github.com/cloudbase/garm/client/organizations"
- "github.com/cloudbase/garm/params"
+ "strings"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ "github.com/cloudbase/garm-provider-common/util"
+ apiClientOrgs "github.com/cloudbase/garm/client/organizations"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
- orgName string
- orgWebhookSecret string
- orgCreds string
+ orgName string
+ orgEndpoint string
+ orgWebhookSecret string
+ orgCreds string
+ orgRandomWebhookSecret bool
+ insecureOrgWebhook bool
+ keepOrgWebhook bool
+ installOrgWebhook bool
)
// organizationCmd represents the organization command
@@ -44,28 +52,162 @@ organization for which garm maintains pools of self hosted runners.`,
Run: nil,
}
+var orgWebhookCmd = &cobra.Command{
+ Use: "webhook",
+ Short: "Manage organization webhooks",
+ Long: `Manage organization webhooks.`,
+ SilenceUsage: true,
+ Run: nil,
+}
+
+var orgWebhookInstallCmd = &cobra.Command{
+ Use: "install",
+ Short: "Install webhook",
+ Long: `Install webhook for an organization.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an organization ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
+ installWebhookReq := apiClientOrgs.NewInstallOrgWebhookParams()
+ installWebhookReq.OrgID = orgID
+ installWebhookReq.Body.InsecureSSL = insecureOrgWebhook
+ installWebhookReq.Body.WebhookEndpointType = params.WebhookEndpointDirect
+
+ response, err := apiCli.Organizations.InstallOrgWebhook(installWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneHookInfo(response.Payload)
+ return nil
+ },
+}
+
+var orgHookInfoShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show webhook info",
+ Long: `Show webhook info for an organization.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an organization ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+ showWebhookInfoReq := apiClientOrgs.NewGetOrgWebhookInfoParams()
+ showWebhookInfoReq.OrgID = orgID
+
+ response, err := apiCli.Organizations.GetOrgWebhookInfo(showWebhookInfoReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneHookInfo(response.Payload)
+ return nil
+ },
+}
+
+var orgWebhookUninstallCmd = &cobra.Command{
+ Use: "uninstall",
+ Short: "Uninstall webhook",
+ Long: `Uninstall webhook for an organization.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an organization ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
+ uninstallWebhookReq := apiClientOrgs.NewUninstallOrgWebhookParams()
+ uninstallWebhookReq.OrgID = orgID
+
+ err = apiCli.Organizations.UninstallOrgWebhook(uninstallWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
var orgAddCmd = &cobra.Command{
Use: "add",
Aliases: []string{"create"},
Short: "Add organization",
Long: `Add a new organization to the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
+ if orgRandomWebhookSecret {
+ secret, err := util.GetRandomString(32)
+ if err != nil {
+ return err
+ }
+ orgWebhookSecret = secret
+ }
+
newOrgReq := apiClientOrgs.NewCreateOrgParams()
newOrgReq.Body = params.CreateOrgParams{
- Name: orgName,
- WebhookSecret: orgWebhookSecret,
- CredentialsName: orgCreds,
+ Name: orgName,
+ WebhookSecret: orgWebhookSecret,
+ CredentialsName: orgCreds,
+ ForgeType: params.EndpointType(forgeType),
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
}
response, err := apiCli.Organizations.CreateOrg(newOrgReq, authToken)
if err != nil {
return err
}
- formatOneOrganization(response.Payload)
+
+ if installOrgWebhook {
+ installWebhookReq := apiClientOrgs.NewInstallOrgWebhookParams()
+ installWebhookReq.OrgID = response.Payload.ID
+ installWebhookReq.Body.WebhookEndpointType = params.WebhookEndpointDirect
+
+ _, err = apiCli.Organizations.InstallOrgWebhook(installWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ }
+
+ getOrgRequest := apiClientOrgs.NewGetOrgParams()
+ getOrgRequest.OrgID = response.Payload.ID
+ org, err := apiCli.Organizations.GetOrg(getOrgRequest, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneOrganization(org.Payload)
return nil
},
}
@@ -75,7 +217,7 @@ var orgUpdateCmd = &cobra.Command{
Short: "Update organization",
Long: `Update organization credentials or webhook secret.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -87,12 +229,19 @@ var orgUpdateCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
updateOrgReq := apiClientOrgs.NewUpdateOrgParams()
updateOrgReq.Body = params.UpdateEntityParams{
- WebhookSecret: repoWebhookSecret,
- CredentialsName: orgCreds,
+ WebhookSecret: orgWebhookSecret,
+ CredentialsName: orgCreds,
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
}
- updateOrgReq.OrgID = args[0]
+ updateOrgReq.OrgID = orgID
response, err := apiCli.Organizations.UpdateOrg(updateOrgReq, authToken)
if err != nil {
return err
@@ -108,12 +257,14 @@ var orgListCmd = &cobra.Command{
Short: "List organizations",
Long: `List all configured organizations that are currently managed.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
listOrgsReq := apiClientOrgs.NewListOrgsParams()
+ listOrgsReq.Name = &orgName
+ listOrgsReq.Endpoint = &orgEndpoint
response, err := apiCli.Organizations.ListOrgs(listOrgsReq, authToken)
if err != nil {
return err
@@ -128,7 +279,7 @@ var orgShowCmd = &cobra.Command{
Short: "Show details for one organization",
Long: `Displays detailed information about a single organization.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -138,8 +289,14 @@ var orgShowCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
showOrgReq := apiClientOrgs.NewGetOrgParams()
- showOrgReq.OrgID = args[0]
+ showOrgReq.OrgID = orgID
response, err := apiCli.Organizations.GetOrg(showOrgReq, authToken)
if err != nil {
return err
@@ -155,7 +312,7 @@ var orgDeleteCmd = &cobra.Command{
Short: "Removes one organization",
Long: `Delete one organization from the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -165,8 +322,15 @@ var orgDeleteCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
deleteOrgReq := apiClientOrgs.NewDeleteOrgParams()
- deleteOrgReq.OrgID = args[0]
+ deleteOrgReq.OrgID = orgID
+ deleteOrgReq.KeepWebhook = &keepOrgWebhook
if err := apiCli.Organizations.DeleteOrg(deleteOrgReq, authToken); err != nil {
return err
}
@@ -175,14 +339,45 @@ var orgDeleteCmd = &cobra.Command{
}
func init() {
-
orgAddCmd.Flags().StringVar(&orgName, "name", "", "The name of the organization")
+ orgAddCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", string(params.PoolBalancerTypeRoundRobin), "The balancing strategy to use when creating runners in pools matching requested labels.")
orgAddCmd.Flags().StringVar(&orgWebhookSecret, "webhook-secret", "", "The webhook secret for this organization")
+ orgAddCmd.Flags().StringVar(&forgeType, "forge-type", "", "The forge type of the organization. Supported values: github, gitea.")
orgAddCmd.Flags().StringVar(&orgCreds, "credentials", "", "Credentials name. See credentials list.")
+ orgAddCmd.Flags().BoolVar(&orgRandomWebhookSecret, "random-webhook-secret", false, "Generate a random webhook secret for this organization.")
+ orgAddCmd.Flags().BoolVar(&installOrgWebhook, "install-webhook", false, "Install the webhook as part of the add operation.")
+ orgAddCmd.MarkFlagsMutuallyExclusive("webhook-secret", "random-webhook-secret")
+ orgAddCmd.MarkFlagsOneRequired("webhook-secret", "random-webhook-secret")
+
+ orgListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+ orgListCmd.Flags().StringVarP(&orgName, "name", "n", "", "Exact org name to filter by.")
+ orgListCmd.Flags().StringVarP(&orgEndpoint, "endpoint", "e", "", "Exact endpoint name to filter by.")
+
orgAddCmd.MarkFlagRequired("credentials") //nolint
orgAddCmd.MarkFlagRequired("name") //nolint
+
+ orgDeleteCmd.Flags().BoolVar(&keepOrgWebhook, "keep-webhook", false, "Do not delete any existing webhook when removing the organization from GARM.")
+ orgDeleteCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgShowCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
orgUpdateCmd.Flags().StringVar(&orgWebhookSecret, "webhook-secret", "", "The webhook secret for this organization")
orgUpdateCmd.Flags().StringVar(&orgCreds, "credentials", "", "Credentials name. See credentials list.")
+ orgUpdateCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", "", "The balancing strategy to use when creating runners in pools matching requested labels.")
+ orgUpdateCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgWebhookInstallCmd.Flags().BoolVar(&insecureOrgWebhook, "insecure", false, "Ignore self signed certificate errors.")
+ orgWebhookInstallCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgWebhookUninstallCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgHookInfoShowCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgWebhookCmd.AddCommand(
+ orgWebhookInstallCmd,
+ orgWebhookUninstallCmd,
+ orgHookInfoShowCmd,
+ )
organizationCmd.AddCommand(
orgListCmd,
@@ -190,29 +385,53 @@ func init() {
orgShowCmd,
orgDeleteCmd,
orgUpdateCmd,
+ orgWebhookCmd,
)
rootCmd.AddCommand(organizationCmd)
}
func formatOrganizations(orgs []params.Organization) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(orgs)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"ID", "Name", "Credentials name", "Pool mgr running"}
+ header := table.Row{"ID", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Forge type", "Pool mgr running"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
t.AppendHeader(header)
for _, val := range orgs {
- t.AppendRow(table.Row{val.ID, val.Name, val.CredentialsName, val.PoolManagerStatus.IsRunning})
+ forgeType := val.Endpoint.EndpointType
+ if forgeType == "" {
+ forgeType = params.GithubEndpointType
+ }
+ row := table.Row{val.ID, val.Name, val.Endpoint.Name, val.CredentialsName, val.GetBalancerType(), forgeType, val.PoolManagerStatus.IsRunning}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatOneOrganization(org params.Organization) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(org)
+ return
+ }
t := table.NewWriter()
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", org.ID})
+ t.AppendRow(table.Row{"Created At", org.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", org.UpdatedAt})
t.AppendRow(table.Row{"Name", org.Name})
+ t.AppendRow(table.Row{"Endpoint", org.Endpoint.Name})
+ t.AppendRow(table.Row{"Pool balancer type", org.GetBalancerType()})
t.AppendRow(table.Row{"Credentials", org.CredentialsName})
t.AppendRow(table.Row{"Pool manager running", org.PoolManagerStatus.IsRunning})
if !org.PoolManagerStatus.IsRunning {
@@ -223,9 +442,14 @@ func formatOneOrganization(org params.Organization) {
t.AppendRow(table.Row{"Pools", pool.ID}, rowConfigAutoMerge)
}
}
+ if len(org.Events) > 0 {
+ for _, event := range org.Events {
+ t.AppendRow(table.Row{"Events", fmt.Sprintf("%s %s: %s", event.CreatedAt.Format("2006-01-02T15:04:05"), strings.ToUpper(string(event.EventLevel)), event.Message)}, rowConfigAutoMerge)
+ }
+ }
t.SetColumnConfigs([]table.ColumnConfig{
{Number: 1, AutoMerge: true},
- {Number: 2, AutoMerge: false},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
})
fmt.Println(t.Render())
diff --git a/cmd/garm-cli/cmd/pool.go b/cmd/garm-cli/cmd/pool.go
index 024a58b7..5b8cadf3 100644
--- a/cmd/garm-cli/cmd/pool.go
+++ b/cmd/garm-cli/cmd/pool.go
@@ -20,18 +20,16 @@ import (
"os"
"strings"
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
apiClientOrgs "github.com/cloudbase/garm/client/organizations"
apiClientPools "github.com/cloudbase/garm/client/pools"
apiClientRepos "github.com/cloudbase/garm/client/repositories"
-
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
"github.com/cloudbase/garm/params"
-
- "github.com/jedib0t/go-pretty/v6/table"
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- commonParams "github.com/cloudbase/garm-provider-common/params"
)
var (
@@ -53,8 +51,13 @@ var (
poolExtraSpecs string
poolAll bool
poolGitHubRunnerGroup string
+ priority uint
)
+type poolsPayloadGetter interface {
+ GetPayload() params.Pools
+}
+
// runnerCmd represents the runner command
var poolCmd = &cobra.Command{
Use: "pool",
@@ -83,9 +86,9 @@ Example:
garm-cli pool list --org=5493e51f-3170-4ce3-9f05-3fe690fc6ec6
List pools from one enterprise:
- garm-cli pool list --org=a8ee4c66-e762-4cbe-a35d-175dba2c9e62
+ garm-cli pool list --enterprise=a8ee4c66-e762-4cbe-a35d-175dba2c9e62
- List all pools from all repos and orgs:
+ List all pools from all repos, orgs and enterprises:
garm-cli pool list --all
`,
@@ -95,37 +98,38 @@ Example:
return errNeedsInitError
}
- var pools []params.Pool
+ var response poolsPayloadGetter
var err error
switch len(args) {
case 0:
if cmd.Flags().Changed("repo") {
- var response *apiClientRepos.ListRepoPoolsOK
+ poolRepository, err = resolveRepository(poolRepository, endpointName)
+ if err != nil {
+ return err
+ }
listRepoPoolsReq := apiClientRepos.NewListRepoPoolsParams()
listRepoPoolsReq.RepoID = poolRepository
response, err = apiCli.Repositories.ListRepoPools(listRepoPoolsReq, authToken)
- pools = response.Payload
} else if cmd.Flags().Changed("org") {
- var response *apiClientOrgs.ListOrgPoolsOK
+ poolOrganization, err = resolveOrganization(poolOrganization, endpointName)
+ if err != nil {
+ return err
+ }
listOrgPoolsReq := apiClientOrgs.NewListOrgPoolsParams()
listOrgPoolsReq.OrgID = poolOrganization
response, err = apiCli.Organizations.ListOrgPools(listOrgPoolsReq, authToken)
- pools = response.Payload
} else if cmd.Flags().Changed("enterprise") {
- var response *apiClientEnterprises.ListEnterprisePoolsOK
+ poolEnterprise, err = resolveEnterprise(poolEnterprise, endpointName)
+ if err != nil {
+ return err
+ }
listEnterprisePoolsReq := apiClientEnterprises.NewListEnterprisePoolsParams()
listEnterprisePoolsReq.EnterpriseID = poolEnterprise
response, err = apiCli.Enterprises.ListEnterprisePools(listEnterprisePoolsReq, authToken)
- pools = response.Payload
- } else if cmd.Flags().Changed("all") {
- var response *apiClientPools.ListPoolsOK
+ } else {
listPoolsReq := apiClientPools.NewListPoolsParams()
response, err = apiCli.Pools.ListPools(listPoolsReq, authToken)
- pools = response.Payload
- } else {
- cmd.Help() //nolint
- os.Exit(0)
}
default:
cmd.Help() //nolint
@@ -135,7 +139,7 @@ Example:
if err != nil {
return err
}
- formatPools(pools)
+ formatPools(response.GetPayload())
return nil
},
}
@@ -145,7 +149,7 @@ var poolShowCmd = &cobra.Command{
Short: "Show details for a runner",
Long: `Displays a detailed view of a single runner.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -175,7 +179,7 @@ var poolDeleteCmd = &cobra.Command{
Short: "Delete pool by ID",
Long: `Delete one pool by referencing it's ID, regardless of repo or org.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -197,13 +201,17 @@ var poolDeleteCmd = &cobra.Command{
},
}
+type poolPayloadGetter interface {
+ GetPayload() params.Pool
+}
+
var poolAddCmd = &cobra.Command{
Use: "add",
Aliases: []string{"create"},
Short: "Add pool",
Long: `Add a new pool to a repository or organization.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(cmd *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
@@ -224,6 +232,7 @@ var poolAddCmd = &cobra.Command{
Enabled: poolEnabled,
RunnerBootstrapTimeout: poolRunnerBootstrapTimeout,
GitHubRunnerGroup: poolGitHubRunnerGroup,
+ Priority: priority,
}
if cmd.Flags().Changed("extra-specs") {
@@ -246,30 +255,35 @@ var poolAddCmd = &cobra.Command{
return err
}
- var pool params.Pool
var err error
-
+ var response poolPayloadGetter
if cmd.Flags().Changed("repo") {
- var response *apiClientRepos.CreateRepoPoolOK
+ poolRepository, err = resolveRepository(poolRepository, endpointName)
+ if err != nil {
+ return err
+ }
newRepoPoolReq := apiClientRepos.NewCreateRepoPoolParams()
newRepoPoolReq.RepoID = poolRepository
newRepoPoolReq.Body = newPoolParams
response, err = apiCli.Repositories.CreateRepoPool(newRepoPoolReq, authToken)
- pool = response.Payload
} else if cmd.Flags().Changed("org") {
- var response *apiClientOrgs.CreateOrgPoolOK
+ poolOrganization, err = resolveOrganization(poolOrganization, endpointName)
+ if err != nil {
+ return err
+ }
newOrgPoolReq := apiClientOrgs.NewCreateOrgPoolParams()
newOrgPoolReq.OrgID = poolOrganization
newOrgPoolReq.Body = newPoolParams
response, err = apiCli.Organizations.CreateOrgPool(newOrgPoolReq, authToken)
- pool = response.Payload
} else if cmd.Flags().Changed("enterprise") {
- var response *apiClientEnterprises.CreateEnterprisePoolOK
+ poolEnterprise, err = resolveEnterprise(poolEnterprise, endpointName)
+ if err != nil {
+ return err
+ }
newEnterprisePoolReq := apiClientEnterprises.NewCreateEnterprisePoolParams()
newEnterprisePoolReq.EnterpriseID = poolEnterprise
newEnterprisePoolReq.Body = newPoolParams
response, err = apiCli.Enterprises.CreateEnterprisePool(newEnterprisePoolReq, authToken)
- pool = response.Payload
} else {
cmd.Help() //nolint
os.Exit(0)
@@ -278,7 +292,8 @@ var poolAddCmd = &cobra.Command{
if err != nil {
return err
}
- formatOnePool(pool)
+
+ formatOnePool(response.GetPayload())
return nil
},
}
@@ -289,7 +304,7 @@ var poolUpdateCmd = &cobra.Command{
Long: `Updates pool characteristics.
This command updates the pool characteristics. Runners already created prior to updating
-the pool, will not be recreated. IF they no longer suit your needs, you will need to
+the pool, will not be recreated. If they no longer suit your needs, you will need to
explicitly remove them using the runner delete command.
`,
SilenceUsage: true,
@@ -332,6 +347,9 @@ explicitly remove them using the runner delete command.
if cmd.Flags().Changed("max-runners") {
poolUpdateParams.MaxRunners = &poolMaxRunners
}
+ if cmd.Flags().Changed("priority") {
+ poolUpdateParams.Priority = &priority
+ }
if cmd.Flags().Changed("min-idle-runners") {
poolUpdateParams.MinIdleRunners = &poolMinIdleRunners
@@ -385,12 +403,17 @@ explicitly remove them using the runner delete command.
func init() {
poolListCmd.Flags().StringVarP(&poolRepository, "repo", "r", "", "List all pools within this repository.")
- poolListCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "List all pools withing this organization.")
- poolListCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "List all pools withing this enterprise.")
- poolListCmd.Flags().BoolVarP(&poolAll, "all", "a", false, "List all pools, regardless of org or repo.")
- poolListCmd.MarkFlagsMutuallyExclusive("repo", "org", "all", "enterprise")
+ poolListCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "List all pools within this organization.")
+ poolListCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "List all pools within this enterprise.")
+ poolListCmd.Flags().BoolVarP(&poolAll, "all", "a", true, "List all pools, regardless of org or repo.")
+ poolListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+ poolListCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
+
+ poolListCmd.Flags().MarkDeprecated("all", "all pools are listed by default in the absence of --repo, --org or --enterprise.")
+ poolListCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise", "all")
poolUpdateCmd.Flags().StringVar(&poolImage, "image", "", "The provider-specific image name to use for runners in this pool.")
+ poolUpdateCmd.Flags().UintVar(&priority, "priority", 0, "When multiple pools match the same labels, priority dictates the order by which they are returned, in descending order.")
poolUpdateCmd.Flags().StringVar(&poolFlavor, "flavor", "", "The flavor to use for this runner.")
poolUpdateCmd.Flags().StringVar(&poolTags, "tags", "", "A comma separated list of tags to assign to this runner.")
poolUpdateCmd.Flags().StringVar(&poolOSType, "os-type", "linux", "Operating system type (windows, linux, etc).")
@@ -406,6 +429,7 @@ func init() {
poolUpdateCmd.MarkFlagsMutuallyExclusive("extra-specs-file", "extra-specs")
poolAddCmd.Flags().StringVar(&poolProvider, "provider-name", "", "The name of the provider where runners will be created.")
+ poolAddCmd.Flags().UintVar(&priority, "priority", 0, "When multiple pools match the same labels, priority dictates the order by which they are returned, in descending order.")
poolAddCmd.Flags().StringVar(&poolImage, "image", "", "The provider-specific image name to use for runners in this pool.")
poolAddCmd.Flags().StringVar(&poolFlavor, "flavor", "", "The flavor to use for this runner.")
poolAddCmd.Flags().StringVar(&poolRunnerPrefix, "runner-prefix", "", "The name prefix to use for runners in this pool.")
@@ -419,14 +443,16 @@ func init() {
poolAddCmd.Flags().UintVar(&poolRunnerBootstrapTimeout, "runner-bootstrap-timeout", 20, "Duration in minutes after which a runner is considered failed if it does not join Github.")
poolAddCmd.Flags().UintVar(&poolMinIdleRunners, "min-idle-runners", 1, "Attempt to maintain a minimum of idle self-hosted runners of this type.")
poolAddCmd.Flags().BoolVar(&poolEnabled, "enabled", false, "Enable this pool.")
+ poolAddCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
+
poolAddCmd.MarkFlagRequired("provider-name") //nolint
poolAddCmd.MarkFlagRequired("image") //nolint
poolAddCmd.MarkFlagRequired("flavor") //nolint
poolAddCmd.MarkFlagRequired("tags") //nolint
poolAddCmd.Flags().StringVarP(&poolRepository, "repo", "r", "", "Add the new pool within this repository.")
- poolAddCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "Add the new pool withing this organization.")
- poolAddCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "Add the new pool withing this enterprise.")
+ poolAddCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "Add the new pool within this organization.")
+ poolAddCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "Add the new pool within this enterprise.")
poolAddCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise")
poolAddCmd.MarkFlagsMutuallyExclusive("extra-specs-file", "extra-specs")
@@ -444,7 +470,7 @@ func init() {
func extraSpecsFromFile(specsFile string) (json.RawMessage, error) {
data, err := os.ReadFile(specsFile)
if err != nil {
- return nil, errors.Wrap(err, "opening specs file")
+ return nil, fmt.Errorf("error opening specs file: %w", err)
}
return asRawMessage(data)
}
@@ -454,21 +480,31 @@ func asRawMessage(data []byte) (json.RawMessage, error) {
// have a valid json.
var unmarshaled interface{}
if err := json.Unmarshal(data, &unmarshaled); err != nil {
- return nil, errors.Wrap(err, "decoding extra specs")
+ return nil, fmt.Errorf("error decoding extra specs: %w", err)
}
- var asRawJson json.RawMessage
+ var asRawJSON json.RawMessage
var err error
- asRawJson, err = json.Marshal(unmarshaled)
+ asRawJSON, err = json.Marshal(unmarshaled)
if err != nil {
- return nil, errors.Wrap(err, "marshaling json")
+ return nil, fmt.Errorf("error marshaling json: %w", err)
}
- return asRawJson, nil
+ return asRawJSON, nil
}
func formatPools(pools []params.Pool) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(pools)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"ID", "Image", "Flavor", "Tags", "Belongs to", "Level", "Enabled", "Runner Prefix"}
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 2, WidthMax: 40},
+ })
+ header := table.Row{"ID", "Image", "Flavor", "Tags", "Belongs to", "Endpoint", "Forge Type", "Enabled"}
+ if long {
+ header = append(header, "Level", "Created At", "Updated at", "Runner Prefix", "Priority")
+ }
t.AppendHeader(header)
for _, pool := range pools {
@@ -479,23 +515,32 @@ func formatPools(pools []params.Pool) {
var belongsTo string
var level string
- if pool.RepoID != "" && pool.RepoName != "" {
+ switch {
+ case pool.RepoID != "" && pool.RepoName != "":
belongsTo = pool.RepoName
- level = "repo"
- } else if pool.OrgID != "" && pool.OrgName != "" {
+ level = entityTypeRepo
+ case pool.OrgID != "" && pool.OrgName != "":
belongsTo = pool.OrgName
- level = "org"
- } else if pool.EnterpriseID != "" && pool.EnterpriseName != "" {
+ level = entityTypeOrg
+ case pool.EnterpriseID != "" && pool.EnterpriseName != "":
belongsTo = pool.EnterpriseName
- level = "enterprise"
+ level = entityTypeEnterprise
}
- t.AppendRow(table.Row{pool.ID, pool.Image, pool.Flavor, strings.Join(tags, " "), belongsTo, level, pool.Enabled, pool.GetRunnerPrefix()})
+ row := table.Row{pool.ID, pool.Image, pool.Flavor, strings.Join(tags, " "), belongsTo, pool.Endpoint.Name, pool.Endpoint.EndpointType, pool.Enabled}
+ if long {
+ row = append(row, level, pool.CreatedAt, pool.UpdatedAt, pool.GetRunnerPrefix(), pool.Priority)
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatOnePool(pool params.Pool) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(pool)
+ return
+ }
t := table.NewWriter()
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
@@ -509,20 +554,24 @@ func formatOnePool(pool params.Pool) {
var belongsTo string
var level string
- if pool.RepoID != "" && pool.RepoName != "" {
+ switch {
+ case pool.RepoID != "" && pool.RepoName != "":
belongsTo = pool.RepoName
- level = "repo"
- } else if pool.OrgID != "" && pool.OrgName != "" {
+ level = entityTypeRepo
+ case pool.OrgID != "" && pool.OrgName != "":
belongsTo = pool.OrgName
- level = "org"
- } else if pool.EnterpriseID != "" && pool.EnterpriseName != "" {
+ level = entityTypeOrg
+ case pool.EnterpriseID != "" && pool.EnterpriseName != "":
belongsTo = pool.EnterpriseName
- level = "enterprise"
+ level = entityTypeEnterprise
}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", pool.ID})
+ t.AppendRow(table.Row{"Created At", pool.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", pool.UpdatedAt})
t.AppendRow(table.Row{"Provider Name", pool.ProviderName})
+ t.AppendRow(table.Row{"Priority", pool.Priority})
t.AppendRow(table.Row{"Image", pool.Image})
t.AppendRow(table.Row{"Flavor", pool.Flavor})
t.AppendRow(table.Row{"OS Type", pool.OSType})
@@ -536,7 +585,9 @@ func formatOnePool(pool params.Pool) {
t.AppendRow(table.Row{"Enabled", pool.Enabled})
t.AppendRow(table.Row{"Runner Prefix", pool.GetRunnerPrefix()})
t.AppendRow(table.Row{"Extra specs", string(pool.ExtraSpecs)})
- t.AppendRow(table.Row{"GitHub Runner Group", string(pool.GitHubRunnerGroup)})
+ t.AppendRow(table.Row{"GitHub Runner Group", pool.GitHubRunnerGroup})
+ t.AppendRow(table.Row{"Forge Type", pool.Endpoint.EndpointType})
+ t.AppendRow(table.Row{"Endpoint Name", pool.Endpoint.Name})
if len(pool.Instances) > 0 {
for _, instance := range pool.Instances {
diff --git a/cmd/garm-cli/cmd/profile.go b/cmd/garm-cli/cmd/profile.go
index 71843df8..7e3e4d5b 100644
--- a/cmd/garm-cli/cmd/profile.go
+++ b/cmd/garm-cli/cmd/profile.go
@@ -18,13 +18,13 @@ import (
"fmt"
"strings"
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
apiClientLogin "github.com/cloudbase/garm/client/login"
"github.com/cloudbase/garm/cmd/garm-cli/common"
"github.com/cloudbase/garm/cmd/garm-cli/config"
"github.com/cloudbase/garm/params"
-
- "github.com/jedib0t/go-pretty/v6/table"
- "github.com/spf13/cobra"
)
var (
@@ -55,7 +55,7 @@ This command will list all currently defined profiles in the local configuration
file of the garm client.
`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
@@ -76,7 +76,7 @@ var profileDeleteCmd = &cobra.Command{
Short: "Delete profile",
Long: `Delete a profile from the local CLI configuration.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -101,7 +101,7 @@ var poolSwitchCmd = &cobra.Command{
Short: "Switch to a different profile",
Long: `Switch the CLI to a different profile.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -132,7 +132,7 @@ var profileAddCmd = &cobra.Command{
Short: "Add profile",
Long: `Create a profile for a new garm installation.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if cfg != nil {
if cfg.HasManager(loginProfileName) {
return fmt.Errorf("a manager with name %s already exists in your local config", loginProfileName)
@@ -145,7 +145,7 @@ var profileAddCmd = &cobra.Command{
url := strings.TrimSuffix(loginURL, "/")
- initApiClient(url, "")
+ initAPIClient(url, "")
newLoginParamsReq := apiClientLogin.NewLoginParams()
newLoginParamsReq.Body = params.PasswordLoginParams{
@@ -180,7 +180,7 @@ This command will refresh the bearer token associated with an already defined ga
installation, by performing a login.
`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
@@ -239,6 +239,10 @@ func init() {
}
func formatProfiles(profiles []config.Manager) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(profiles)
+ return
+ }
t := table.NewWriter()
header := table.Row{"Name", "Base URL"}
t.AppendHeader(header)
@@ -264,7 +268,7 @@ func promptUnsetLoginVariables() error {
}
if loginPassword == "" {
- loginPassword, err = common.PromptPassword("Password")
+ loginPassword, err = common.PromptPassword("Password", "")
if err != nil {
return err
}
diff --git a/cmd/garm-cli/cmd/provider.go b/cmd/garm-cli/cmd/provider.go
index e9635dd0..b4f05401 100644
--- a/cmd/garm-cli/cmd/provider.go
+++ b/cmd/garm-cli/cmd/provider.go
@@ -17,11 +17,12 @@ package cmd
import (
"fmt"
- apiClientProviders "github.com/cloudbase/garm/client/providers"
- "github.com/cloudbase/garm/params"
-
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ apiClientProviders "github.com/cloudbase/garm/client/providers"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
// providerCmd represents the provider command
@@ -45,7 +46,7 @@ func init() {
Short: "List all configured providers",
Long: `List all cloud providers configured with the service.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
@@ -64,6 +65,10 @@ func init() {
}
func formatProviders(providers []params.Provider) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(providers)
+ return
+ }
t := table.NewWriter()
header := table.Row{"Name", "Description", "Type"}
t.AppendHeader(header)
diff --git a/cmd/garm-cli/cmd/repository.go b/cmd/garm-cli/cmd/repository.go
index 8466b318..cca1a7fe 100644
--- a/cmd/garm-cli/cmd/repository.go
+++ b/cmd/garm-cli/cmd/repository.go
@@ -16,19 +16,28 @@ package cmd
import (
"fmt"
-
- apiClientRepos "github.com/cloudbase/garm/client/repositories"
- "github.com/cloudbase/garm/params"
+ "strings"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ "github.com/cloudbase/garm-provider-common/util"
+ apiClientRepos "github.com/cloudbase/garm/client/repositories"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
- repoOwner string
- repoName string
- repoWebhookSecret string
- repoCreds string
+ repoOwner string
+ repoName string
+ repoEndpoint string
+ repoWebhookSecret string
+ repoCreds string
+ forgeType string
+ randomWebhookSecret bool
+ insecureRepoWebhook bool
+ keepRepoWebhook bool
+ installRepoWebhook bool
)
// repositoryCmd represents the repository command
@@ -45,29 +54,165 @@ repository for which the garm maintains pools of self hosted runners.`,
Run: nil,
}
+var repoWebhookCmd = &cobra.Command{
+ Use: "webhook",
+ Short: "Manage repository webhooks",
+ Long: `Manage repository webhooks.`,
+ SilenceUsage: true,
+ Run: nil,
+}
+
+var repoWebhookInstallCmd = &cobra.Command{
+ Use: "install",
+ Short: "Install webhook",
+ Long: `Install webhook for a repository.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires a repository ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
+ installWebhookReq := apiClientRepos.NewInstallRepoWebhookParams()
+ installWebhookReq.RepoID = repoID
+ installWebhookReq.Body.InsecureSSL = insecureRepoWebhook
+ installWebhookReq.Body.WebhookEndpointType = params.WebhookEndpointDirect
+
+ response, err := apiCli.Repositories.InstallRepoWebhook(installWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneHookInfo(response.Payload)
+ return nil
+ },
+}
+
+var repoHookInfoShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show webhook info",
+ Long: `Show webhook info for a repository.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires a repository ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
+ showWebhookInfoReq := apiClientRepos.NewGetRepoWebhookInfoParams()
+ showWebhookInfoReq.RepoID = repoID
+
+ response, err := apiCli.Repositories.GetRepoWebhookInfo(showWebhookInfoReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneHookInfo(response.Payload)
+ return nil
+ },
+}
+
+var repoWebhookUninstallCmd = &cobra.Command{
+ Use: "uninstall",
+ Short: "Uninstall webhook",
+ Long: `Uninstall webhook for a repository.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires a repository ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
+ uninstallWebhookReq := apiClientRepos.NewUninstallRepoWebhookParams()
+ uninstallWebhookReq.RepoID = repoID
+
+ err = apiCli.Repositories.UninstallRepoWebhook(uninstallWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
var repoAddCmd = &cobra.Command{
Use: "add",
Aliases: []string{"create"},
Short: "Add repository",
Long: `Add a new repository to the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
+ if randomWebhookSecret {
+ secret, err := util.GetRandomString(32)
+ if err != nil {
+ return err
+ }
+ repoWebhookSecret = secret
+ }
+
newRepoReq := apiClientRepos.NewCreateRepoParams()
newRepoReq.Body = params.CreateRepoParams{
- Owner: repoOwner,
- Name: repoName,
- WebhookSecret: repoWebhookSecret,
- CredentialsName: repoCreds,
+ Owner: repoOwner,
+ Name: repoName,
+ WebhookSecret: repoWebhookSecret,
+ CredentialsName: repoCreds,
+ ForgeType: params.EndpointType(forgeType),
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
}
response, err := apiCli.Repositories.CreateRepo(newRepoReq, authToken)
if err != nil {
return err
}
- formatOneRepository(response.Payload)
+
+ if installRepoWebhook {
+ installWebhookReq := apiClientRepos.NewInstallRepoWebhookParams()
+ installWebhookReq.RepoID = response.Payload.ID
+ installWebhookReq.Body.WebhookEndpointType = params.WebhookEndpointDirect
+
+ _, err := apiCli.Repositories.InstallRepoWebhook(installWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ }
+
+ getRepoReq := apiClientRepos.NewGetRepoParams()
+ getRepoReq.RepoID = response.Payload.ID
+ repo, err := apiCli.Repositories.GetRepo(getRepoReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneRepository(repo.Payload)
return nil
},
}
@@ -76,14 +221,17 @@ var repoListCmd = &cobra.Command{
Use: "list",
Aliases: []string{"ls"},
Short: "List repositories",
- Long: `List all configured respositories that are currently managed.`,
+ Long: `List all configured repositories that are currently managed.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
listReposReq := apiClientRepos.NewListReposParams()
+ listReposReq.Name = &repoName
+ listReposReq.Owner = &repoOwner
+ listReposReq.Endpoint = &repoEndpoint
response, err := apiCli.Repositories.ListRepos(listReposReq, authToken)
if err != nil {
return err
@@ -98,7 +246,7 @@ var repoUpdateCmd = &cobra.Command{
Short: "Update repository",
Long: `Update repository credentials or webhook secret.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -110,12 +258,19 @@ var repoUpdateCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
updateReposReq := apiClientRepos.NewUpdateRepoParams()
updateReposReq.Body = params.UpdateEntityParams{
- WebhookSecret: repoWebhookSecret,
- CredentialsName: repoCreds,
+ WebhookSecret: repoWebhookSecret,
+ CredentialsName: repoCreds,
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
}
- updateReposReq.RepoID = args[0]
+ updateReposReq.RepoID = repoID
response, err := apiCli.Repositories.UpdateRepo(updateReposReq, authToken)
if err != nil {
@@ -131,7 +286,7 @@ var repoShowCmd = &cobra.Command{
Short: "Show details for one repository",
Long: `Displays detailed information about a single repository.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -141,8 +296,14 @@ var repoShowCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
showRepoReq := apiClientRepos.NewGetRepoParams()
- showRepoReq.RepoID = args[0]
+ showRepoReq.RepoID = repoID
response, err := apiCli.Repositories.GetRepo(showRepoReq, authToken)
if err != nil {
return err
@@ -158,7 +319,7 @@ var repoDeleteCmd = &cobra.Command{
Short: "Removes one repository",
Long: `Delete one repository from the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -168,8 +329,15 @@ var repoDeleteCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
deleteRepoReq := apiClientRepos.NewDeleteRepoParams()
- deleteRepoReq.RepoID = args[0]
+ deleteRepoReq.RepoID = repoID
+ deleteRepoReq.KeepWebhook = &keepRepoWebhook
if err := apiCli.Repositories.DeleteRepo(deleteRepoReq, authToken); err != nil {
return err
}
@@ -178,16 +346,48 @@ var repoDeleteCmd = &cobra.Command{
}
func init() {
-
repoAddCmd.Flags().StringVar(&repoOwner, "owner", "", "The owner of this repository")
+ repoAddCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", string(params.PoolBalancerTypeRoundRobin), "The balancing strategy to use when creating runners in pools matching requested labels.")
repoAddCmd.Flags().StringVar(&repoName, "name", "", "The name of the repository")
+ repoAddCmd.Flags().StringVar(&forgeType, "forge-type", "", "The forge type of the repository. Supported values: github, gitea.")
repoAddCmd.Flags().StringVar(&repoWebhookSecret, "webhook-secret", "", "The webhook secret for this repository")
repoAddCmd.Flags().StringVar(&repoCreds, "credentials", "", "Credentials name. See credentials list.")
+ repoAddCmd.Flags().BoolVar(&randomWebhookSecret, "random-webhook-secret", false, "Generate a random webhook secret for this repository.")
+ repoAddCmd.Flags().BoolVar(&installRepoWebhook, "install-webhook", false, "Install the webhook as part of the add operation.")
+ repoAddCmd.MarkFlagsMutuallyExclusive("webhook-secret", "random-webhook-secret")
+ repoAddCmd.MarkFlagsOneRequired("webhook-secret", "random-webhook-secret")
+
+ repoListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+ repoListCmd.Flags().StringVarP(&repoName, "name", "n", "", "Exact repo name to filter by.")
+ repoListCmd.Flags().StringVarP(&repoOwner, "owner", "o", "", "Exact repo owner to filter by.")
+ repoListCmd.Flags().StringVarP(&repoEndpoint, "endpoint", "e", "", "Exact endpoint name to filter by.")
+
repoAddCmd.MarkFlagRequired("credentials") //nolint
repoAddCmd.MarkFlagRequired("owner") //nolint
repoAddCmd.MarkFlagRequired("name") //nolint
- repoUpdateCmd.Flags().StringVar(&repoWebhookSecret, "webhook-secret", "", "The webhook secret for this repository")
+
+ repoDeleteCmd.Flags().BoolVar(&keepRepoWebhook, "keep-webhook", false, "Do not delete any existing webhook when removing the repo from GARM.")
+ repoDeleteCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoShowCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoUpdateCmd.Flags().StringVar(&repoWebhookSecret, "webhook-secret", "", "The webhook secret for this repository. If you update this secret, you will have to manually update the secret in GitHub as well.")
repoUpdateCmd.Flags().StringVar(&repoCreds, "credentials", "", "Credentials name. See credentials list.")
+ repoUpdateCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", "", "The balancing strategy to use when creating runners in pools matching requested labels.")
+ repoUpdateCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoWebhookInstallCmd.Flags().BoolVar(&insecureRepoWebhook, "insecure", false, "Ignore self signed certificate errors.")
+ repoWebhookInstallCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoWebhookUninstallCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoHookInfoShowCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoWebhookCmd.AddCommand(
+ repoWebhookInstallCmd,
+ repoWebhookUninstallCmd,
+ repoHookInfoShowCmd,
+ )
repositoryCmd.AddCommand(
repoListCmd,
@@ -195,31 +395,55 @@ func init() {
repoShowCmd,
repoDeleteCmd,
repoUpdateCmd,
+ repoWebhookCmd,
)
rootCmd.AddCommand(repositoryCmd)
}
func formatRepositories(repos []params.Repository) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(repos)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"ID", "Owner", "Name", "Credentials name", "Pool mgr running"}
+ header := table.Row{"ID", "Owner", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Forge type", "Pool mgr running"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
t.AppendHeader(header)
for _, val := range repos {
- t.AppendRow(table.Row{val.ID, val.Owner, val.Name, val.CredentialsName, val.PoolManagerStatus.IsRunning})
+ forgeType := val.Endpoint.EndpointType
+ if forgeType == "" {
+ forgeType = params.GithubEndpointType
+ }
+ row := table.Row{val.ID, val.Owner, val.Name, val.Endpoint.Name, val.GetCredentialsName(), val.GetBalancerType(), forgeType, val.PoolManagerStatus.IsRunning}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatOneRepository(repo params.Repository) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(repo)
+ return
+ }
t := table.NewWriter()
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", repo.ID})
+ t.AppendRow(table.Row{"Created At", repo.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", repo.UpdatedAt})
t.AppendRow(table.Row{"Owner", repo.Owner})
t.AppendRow(table.Row{"Name", repo.Name})
- t.AppendRow(table.Row{"Credentials", repo.CredentialsName})
+ t.AppendRow(table.Row{"Endpoint", repo.Endpoint.Name})
+ t.AppendRow(table.Row{"Pool balancer type", repo.GetBalancerType()})
+ t.AppendRow(table.Row{"Credentials", repo.GetCredentialsName()})
t.AppendRow(table.Row{"Pool manager running", repo.PoolManagerStatus.IsRunning})
if !repo.PoolManagerStatus.IsRunning {
t.AppendRow(table.Row{"Failure reason", repo.PoolManagerStatus.FailureReason})
@@ -230,9 +454,16 @@ func formatOneRepository(repo params.Repository) {
t.AppendRow(table.Row{"Pools", pool.ID}, rowConfigAutoMerge)
}
}
+
+ if len(repo.Events) > 0 {
+ for _, event := range repo.Events {
+ t.AppendRow(table.Row{"Events", fmt.Sprintf("%s %s: %s", event.CreatedAt.Format("2006-01-02T15:04:05"), strings.ToUpper(string(event.EventLevel)), event.Message)}, rowConfigAutoMerge)
+ }
+ }
+
t.SetColumnConfigs([]table.ColumnConfig{
{Number: 1, AutoMerge: true},
- {Number: 2, AutoMerge: false},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
})
fmt.Println(t.Render())
diff --git a/cmd/garm-cli/cmd/root.go b/cmd/garm-cli/cmd/root.go
index c491e263..df3ef11b 100644
--- a/cmd/garm-cli/cmd/root.go
+++ b/cmd/garm-cli/cmd/root.go
@@ -15,19 +15,27 @@
package cmd
import (
+ "encoding/json"
"fmt"
"net/url"
"os"
- apiClient "github.com/cloudbase/garm/client"
- "github.com/cloudbase/garm/cmd/garm-cli/config"
"github.com/go-openapi/runtime"
-
openapiRuntimeClient "github.com/go-openapi/runtime/client"
+ "github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ apiClient "github.com/cloudbase/garm/client"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/cmd/garm-cli/config"
+ "github.com/cloudbase/garm/params"
)
-var Version string
+const (
+ entityTypeOrg string = "org"
+ entityTypeRepo string = "repo"
+ entityTypeEnterprise string = "enterprise"
+)
var (
cfg *config.Config
@@ -36,7 +44,9 @@ var (
authToken runtime.ClientAuthInfoWriter
needsInit bool
debug bool
- errNeedsInitError = fmt.Errorf("please log into a garm installation first")
+ poolBalancerType string
+ outputFormat common.OutputFormat = common.OutputFormatTable
+ errNeedsInitError = fmt.Errorf("please log into a garm installation first")
)
// rootCmd represents the base command when called without any subcommands
@@ -50,6 +60,8 @@ var rootCmd = &cobra.Command{
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug on all API calls")
+ rootCmd.PersistentFlags().Var(&outputFormat, "format", "Output format (table, json)")
+
cobra.OnInitialize(initConfig)
err := rootCmd.Execute()
@@ -58,24 +70,24 @@ func Execute() {
}
}
-func initApiClient(baseUrl, token string) {
- baseUrlParsed, err := url.Parse(baseUrl)
+func initAPIClient(baseURL, token string) {
+ baseURLParsed, err := url.Parse(baseURL)
if err != nil {
- fmt.Printf("Failed to parse base url %s: %s", baseUrl, err)
+ fmt.Printf("Failed to parse base url %s: %s", baseURL, err)
os.Exit(1)
}
- apiPath, err := url.JoinPath(baseUrlParsed.Path, apiClient.DefaultBasePath)
+ apiPath, err := url.JoinPath(baseURLParsed.Path, apiClient.DefaultBasePath)
if err != nil {
- fmt.Printf("Failed to join base url path %s with %s: %s", baseUrlParsed.Path, apiClient.DefaultBasePath, err)
+ fmt.Printf("Failed to join base url path %s with %s: %s", baseURLParsed.Path, apiClient.DefaultBasePath, err)
os.Exit(1)
}
if debug {
os.Setenv("SWAGGER_DEBUG", "true")
}
transportCfg := apiClient.DefaultTransportConfig().
- WithHost(baseUrlParsed.Host).
+ WithHost(baseURLParsed.Host).
WithBasePath(apiPath).
- WithSchemes([]string{baseUrlParsed.Scheme})
+ WithSchemes([]string{baseURLParsed.Scheme})
apiCli = apiClient.NewHTTPClientWithConfig(nil, transportCfg)
authToken = openapiRuntimeClient.BearerToken(token)
}
@@ -96,5 +108,28 @@ func initConfig() {
mgr = cfg.Managers[0]
}
}
- initApiClient(mgr.BaseURL, mgr.Token)
+ initAPIClient(mgr.BaseURL, mgr.Token)
+}
+
+func formatOneHookInfo(hook params.HookInfo) {
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+ t.AppendHeader(header)
+ t.AppendRows([]table.Row{
+ {"ID", hook.ID},
+ {"URL", hook.URL},
+ {"Events", hook.Events},
+ {"Active", hook.Active},
+ {"Insecure SSL", hook.InsecureSSL},
+ })
+ fmt.Println(t.Render())
+}
+
+func printAsJSON(value interface{}) {
+ asJs, err := json.Marshal(value)
+ if err != nil {
+ fmt.Printf("Failed to marshal value to json: %s", err)
+ os.Exit(1)
+ }
+ fmt.Println(string(asJs))
}
diff --git a/cmd/garm-cli/cmd/runner.go b/cmd/garm-cli/cmd/runner.go
index b4cfbcfe..44a7b8df 100644
--- a/cmd/garm-cli/cmd/runner.go
+++ b/cmd/garm-cli/cmd/runner.go
@@ -18,22 +18,25 @@ import (
"fmt"
"os"
- "github.com/cloudbase/garm/params"
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
apiClientInstances "github.com/cloudbase/garm/client/instances"
apiClientOrgs "github.com/cloudbase/garm/client/organizations"
apiClientRepos "github.com/cloudbase/garm/client/repositories"
- "github.com/jedib0t/go-pretty/v6/table"
- "github.com/spf13/cobra"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
- runnerRepository string
- runnerOrganization string
- runnerEnterprise string
- runnerAll bool
- forceRemove bool
+ runnerRepository string
+ runnerOrganization string
+ runnerEnterprise string
+ runnerAll bool
+ forceRemove bool
+ bypassGHUnauthorized bool
+ long bool
)
// runnerCmd represents the runner command
@@ -47,6 +50,10 @@ list all instances.`,
Run: nil,
}
+type instancesPayloadGetter interface {
+ GetPayload() params.Instances
+}
+
var runnerListCmd = &cobra.Command{
Use: "list",
Aliases: []string{"ls"},
@@ -80,7 +87,7 @@ Example:
return errNeedsInitError
}
- var instances []params.Instance
+ var response instancesPayloadGetter
var err error
switch len(args) {
@@ -92,38 +99,37 @@ Example:
return fmt.Errorf("specifying a pool ID and any of [all org repo enterprise] are mutually exclusive")
}
- var response *apiClientInstances.ListPoolInstancesOK
listPoolInstancesReq := apiClientInstances.NewListPoolInstancesParams()
listPoolInstancesReq.PoolID = args[0]
response, err = apiCli.Instances.ListPoolInstances(listPoolInstancesReq, authToken)
- instances = response.Payload
case 0:
if cmd.Flags().Changed("repo") {
- var response *apiClientRepos.ListRepoInstancesOK
+ runnerRepo, resErr := resolveRepository(runnerRepository, endpointName)
+ if resErr != nil {
+ return resErr
+ }
listRepoInstancesReq := apiClientRepos.NewListRepoInstancesParams()
- listRepoInstancesReq.RepoID = runnerRepository
+ listRepoInstancesReq.RepoID = runnerRepo
response, err = apiCli.Repositories.ListRepoInstances(listRepoInstancesReq, authToken)
- instances = response.Payload
} else if cmd.Flags().Changed("org") {
- var response *apiClientOrgs.ListOrgInstancesOK
+ runnerOrg, resErr := resolveOrganization(runnerOrganization, endpointName)
+ if resErr != nil {
+ return resErr
+ }
listOrgInstancesReq := apiClientOrgs.NewListOrgInstancesParams()
- listOrgInstancesReq.OrgID = runnerOrganization
+ listOrgInstancesReq.OrgID = runnerOrg
response, err = apiCli.Organizations.ListOrgInstances(listOrgInstancesReq, authToken)
- instances = response.Payload
} else if cmd.Flags().Changed("enterprise") {
- var response *apiClientEnterprises.ListEnterpriseInstancesOK
+ runnerEnt, resErr := resolveEnterprise(runnerEnterprise, endpointName)
+ if resErr != nil {
+ return resErr
+ }
listEnterpriseInstancesReq := apiClientEnterprises.NewListEnterpriseInstancesParams()
- listEnterpriseInstancesReq.EnterpriseID = runnerEnterprise
+ listEnterpriseInstancesReq.EnterpriseID = runnerEnt
response, err = apiCli.Enterprises.ListEnterpriseInstances(listEnterpriseInstancesReq, authToken)
- instances = response.Payload
- } else if cmd.Flags().Changed("all") {
- var response *apiClientInstances.ListInstancesOK
+ } else {
listInstancesReq := apiClientInstances.NewListInstancesParams()
response, err = apiCli.Instances.ListInstances(listInstancesReq, authToken)
- instances = response.Payload
- } else {
- cmd.Help() //nolint
- os.Exit(0)
}
default:
cmd.Help() //nolint
@@ -133,7 +139,9 @@ Example:
if err != nil {
return err
}
- formatInstances(instances)
+
+ instances := response.GetPayload()
+ formatInstances(instances, long)
return nil
},
}
@@ -143,7 +151,7 @@ var runnerShowCmd = &cobra.Command{
Short: "Show details for a runner",
Long: `Displays a detailed view of a single runner.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -182,7 +190,7 @@ NOTE: An active runner cannot be removed from Github. You will have
to either cancel the workflow or wait for it to finish.
`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -191,12 +199,10 @@ to either cancel the workflow or wait for it to finish.
return fmt.Errorf("requires a runner name")
}
- if !forceRemove {
- return fmt.Errorf("use --force-remove-runner=true to remove a runner")
- }
-
deleteInstanceReq := apiClientInstances.NewDeleteInstanceParams()
deleteInstanceReq.InstanceName = args[0]
+ deleteInstanceReq.ForceRemove = &forceRemove
+ deleteInstanceReq.BypassGHUnauthorized = &bypassGHUnauthorized
if err := apiCli.Instances.DeleteInstance(deleteInstanceReq, authToken); err != nil {
return err
}
@@ -206,12 +212,17 @@ to either cancel the workflow or wait for it to finish.
func init() {
runnerListCmd.Flags().StringVarP(&runnerRepository, "repo", "r", "", "List all runners from all pools within this repository.")
- runnerListCmd.Flags().StringVarP(&runnerOrganization, "org", "o", "", "List all runners from all pools withing this organization.")
- runnerListCmd.Flags().StringVarP(&runnerEnterprise, "enterprise", "e", "", "List all runners from all pools withing this enterprise.")
- runnerListCmd.Flags().BoolVarP(&runnerAll, "all", "a", false, "List all runners, regardless of org or repo.")
+ runnerListCmd.Flags().StringVarP(&runnerOrganization, "org", "o", "", "List all runners from all pools within this organization.")
+ runnerListCmd.Flags().StringVarP(&runnerEnterprise, "enterprise", "e", "", "List all runners from all pools within this enterprise.")
+ runnerListCmd.Flags().BoolVarP(&runnerAll, "all", "a", true, "List all runners, regardless of org or repo. (deprecated)")
+ runnerListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
runnerListCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise", "all")
+ runnerListCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
- runnerDeleteCmd.Flags().BoolVarP(&forceRemove, "force-remove-runner", "f", false, "Confirm you want to delete a runner")
+ runnerListCmd.Flags().MarkDeprecated("all", "all runners are listed by default in the absence of --repo, --org or --enterprise.")
+
+ runnerDeleteCmd.Flags().BoolVarP(&forceRemove, "force-remove-runner", "f", false, "Forcefully remove a runner. If set to true, GARM will ignore provider errors when removing the runner.")
+ runnerDeleteCmd.Flags().BoolVarP(&bypassGHUnauthorized, "bypass-github-unauthorized", "b", false, "Ignore Unauthorized errors from GitHub and proceed with removing runner from provider and DB. This is useful when credentials are no longer valid and you want to remove your runners. Warning, this has the potential to leave orphaned runners in GitHub. You will need to update your credentials to properly consolidate.")
runnerDeleteCmd.MarkFlagsMutuallyExclusive("force-remove-runner")
runnerCmd.AddCommand(
@@ -223,25 +234,46 @@ func init() {
rootCmd.AddCommand(runnerCmd)
}
-func formatInstances(param []params.Instance) {
+func formatInstances(param []params.Instance, detailed bool) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(param)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"Nr", "Name", "Status", "Runner Status", "Pool ID"}
+ header := table.Row{"Nr", "Name", "Status", "Runner Status", "Pool ID", "Scalse Set ID"}
+ if detailed {
+ header = append(header, "Created At", "Updated At", "Job Name", "Started At", "Run ID", "Repository")
+ }
t.AppendHeader(header)
for idx, inst := range param {
- t.AppendRow(table.Row{idx + 1, inst.Name, inst.Status, inst.RunnerStatus, inst.PoolID})
+ row := table.Row{idx + 1, inst.Name, inst.Status, inst.RunnerStatus, inst.PoolID, inst.ScaleSetID}
+ if detailed {
+ row = append(row, inst.CreatedAt, inst.UpdatedAt)
+ if inst.Job != nil {
+ repo := fmt.Sprintf("%s/%s", inst.Job.RepositoryOwner, inst.Job.RepositoryName)
+ row = append(row, inst.Job.Name, inst.Job.StartedAt, inst.Job.RunID, repo)
+ }
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatSingleInstance(instance params.Instance) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(instance)
+ return
+ }
t := table.NewWriter()
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", instance.ID}, table.RowConfig{AutoMerge: false})
+ t.AppendRow(table.Row{"Created At", instance.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", instance.UpdatedAt})
t.AppendRow(table.Row{"Provider ID", instance.ProviderID}, table.RowConfig{AutoMerge: false})
t.AppendRow(table.Row{"Name", instance.Name}, table.RowConfig{AutoMerge: false})
t.AppendRow(table.Row{"OS Type", instance.OSType}, table.RowConfig{AutoMerge: false})
@@ -250,7 +282,11 @@ func formatSingleInstance(instance params.Instance) {
t.AppendRow(table.Row{"OS Version", instance.OSVersion}, table.RowConfig{AutoMerge: false})
t.AppendRow(table.Row{"Status", instance.Status}, table.RowConfig{AutoMerge: false})
t.AppendRow(table.Row{"Runner Status", instance.RunnerStatus}, table.RowConfig{AutoMerge: false})
- t.AppendRow(table.Row{"Pool ID", instance.PoolID}, table.RowConfig{AutoMerge: false})
+ if instance.PoolID != "" {
+ t.AppendRow(table.Row{"Pool ID", instance.PoolID}, table.RowConfig{AutoMerge: false})
+ } else if instance.ScaleSetID != 0 {
+ t.AppendRow(table.Row{"Scale Set ID", instance.ScaleSetID}, table.RowConfig{AutoMerge: false})
+ }
if len(instance.Addresses) > 0 {
for _, addr := range instance.Addresses {
diff --git a/cmd/garm-cli/cmd/scalesets.go b/cmd/garm-cli/cmd/scalesets.go
new file mode 100644
index 00000000..a78fe33f
--- /dev/null
+++ b/cmd/garm-cli/cmd/scalesets.go
@@ -0,0 +1,539 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
+ apiClientOrgs "github.com/cloudbase/garm/client/organizations"
+ apiClientRepos "github.com/cloudbase/garm/client/repositories"
+ apiClientScaleSets "github.com/cloudbase/garm/client/scalesets"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var (
+ scalesetProvider string
+ scalesetMaxRunners uint
+ scalesetMinIdleRunners uint
+ scalesetRunnerPrefix string
+ scalesetName string
+ scalesetImage string
+ scalesetFlavor string
+ scalesetOSType string
+ scalesetOSArch string
+ scalesetEnabled bool
+ scalesetRunnerBootstrapTimeout uint
+ scalesetRepository string
+ scalesetOrganization string
+ scalesetEnterprise string
+ scalesetExtraSpecsFile string
+ scalesetExtraSpecs string
+ scalesetGitHubRunnerGroup string
+)
+
+type scalesetPayloadGetter interface {
+ GetPayload() params.ScaleSet
+}
+
+type scalesetsPayloadGetter interface {
+ GetPayload() params.ScaleSets
+}
+
+// scalesetCmd represents the scale set command
+var scalesetCmd = &cobra.Command{
+ Use: "scaleset",
+ SilenceUsage: true,
+ Short: "List scale sets",
+ Long: `Query information or perform operations on scale sets.`,
+ Run: nil,
+}
+
+var scalesetListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List scale sets",
+ Long: `List scale sets of repositories, orgs or all of the above.
+
+This command will list scale sets from one repo, one org or all scale sets
+on the system. The list flags are mutually exclusive. You must however
+specify one of them.
+
+Example:
+
+ List scalesets from one repo:
+ garm-cli scaleset list --repo=05e7eac6-4705-486d-89c9-0170bbb576af
+
+ List scalesets from one org:
+ garm-cli scaleset list --org=5493e51f-3170-4ce3-9f05-3fe690fc6ec6
+
+ List scalesets from one enterprise:
+ garm-cli scaleset list --enterprise=a8ee4c66-e762-4cbe-a35d-175dba2c9e62
+
+ List all scalesets from all repos, orgs and enterprises:
+ garm-cli scaleset list --all
+
+`,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ var response scalesetsPayloadGetter
+ var err error
+
+ switch len(args) {
+ case 0:
+ if cmd.Flags().Changed("repo") {
+ scalesetRepository, err = resolveRepository(scalesetRepository, endpointName)
+ if err != nil {
+ return err
+ }
+ listRepoScaleSetsReq := apiClientRepos.NewListRepoScaleSetsParams()
+ listRepoScaleSetsReq.RepoID = scalesetRepository
+ response, err = apiCli.Repositories.ListRepoScaleSets(listRepoScaleSetsReq, authToken)
+ } else if cmd.Flags().Changed("org") {
+ scalesetOrganization, err = resolveOrganization(scalesetOrganization, endpointName)
+ if err != nil {
+ return err
+ }
+ listOrgScaleSetsReq := apiClientOrgs.NewListOrgScaleSetsParams()
+ listOrgScaleSetsReq.OrgID = scalesetOrganization
+ response, err = apiCli.Organizations.ListOrgScaleSets(listOrgScaleSetsReq, authToken)
+ } else if cmd.Flags().Changed("enterprise") {
+ scalesetEnterprise, err = resolveEnterprise(scalesetEnterprise, endpointName)
+ if err != nil {
+ return err
+ }
+ listEnterpriseScaleSetsReq := apiClientEnterprises.NewListEnterpriseScaleSetsParams()
+ listEnterpriseScaleSetsReq.EnterpriseID = scalesetEnterprise
+ response, err = apiCli.Enterprises.ListEnterpriseScaleSets(listEnterpriseScaleSetsReq, authToken)
+ } else {
+ listScaleSetsReq := apiClientScaleSets.NewListScalesetsParams()
+ response, err = apiCli.Scalesets.ListScalesets(listScaleSetsReq, authToken)
+ }
+ default:
+ cmd.Help() //nolint
+ os.Exit(0)
+ }
+
+ if err != nil {
+ return err
+ }
+ formatScaleSets(response.GetPayload())
+ return nil
+ },
+}
+
+var scaleSetShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show details for a scale set",
+ Long: `Displays a detailed view of a single scale set.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) == 0 {
+ return fmt.Errorf("requires a scale set ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ getScaleSetReq := apiClientScaleSets.NewGetScaleSetParams()
+ getScaleSetReq.ScalesetID = args[0]
+ response, err := apiCli.Scalesets.GetScaleSet(getScaleSetReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneScaleSet(response.Payload)
+ return nil
+ },
+}
+
+var scaleSetDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm", "del"},
+ Short: "Delete scale set by ID",
+ Long: `Delete one scale set by referencing it's ID, regardless of repo or org.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) == 0 {
+ return fmt.Errorf("requires a scale set ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ deleteScaleSetReq := apiClientScaleSets.NewDeleteScaleSetParams()
+ deleteScaleSetReq.ScalesetID = args[0]
+ if err := apiCli.Scalesets.DeleteScaleSet(deleteScaleSetReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var scaleSetAddCmd = &cobra.Command{
+ Use: "add",
+ Aliases: []string{"create"},
+ Short: "Add scale set",
+ Long: `Add a new scale set.`,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ newScaleSetParams := params.CreateScaleSetParams{
+ RunnerPrefix: params.RunnerPrefix{
+ Prefix: scalesetRunnerPrefix,
+ },
+ ProviderName: scalesetProvider,
+ Name: scalesetName,
+ MaxRunners: scalesetMaxRunners,
+ MinIdleRunners: scalesetMinIdleRunners,
+ Image: scalesetImage,
+ Flavor: scalesetFlavor,
+ OSType: commonParams.OSType(scalesetOSType),
+ OSArch: commonParams.OSArch(scalesetOSArch),
+ Enabled: scalesetEnabled,
+ RunnerBootstrapTimeout: scalesetRunnerBootstrapTimeout,
+ GitHubRunnerGroup: scalesetGitHubRunnerGroup,
+ }
+
+ if cmd.Flags().Changed("extra-specs") {
+ data, err := asRawMessage([]byte(scalesetExtraSpecs))
+ if err != nil {
+ return err
+ }
+ newScaleSetParams.ExtraSpecs = data
+ }
+
+ if scalesetExtraSpecsFile != "" {
+ data, err := extraSpecsFromFile(scalesetExtraSpecsFile)
+ if err != nil {
+ return err
+ }
+ newScaleSetParams.ExtraSpecs = data
+ }
+
+ if err := newScaleSetParams.Validate(); err != nil {
+ return err
+ }
+
+ var err error
+ var response scalesetPayloadGetter
+ if cmd.Flags().Changed("repo") {
+ scalesetRepository, err = resolveRepository(scalesetRepository, endpointName)
+ if err != nil {
+ return err
+ }
+ newRepoScaleSetReq := apiClientRepos.NewCreateRepoScaleSetParams()
+ newRepoScaleSetReq.RepoID = scalesetRepository
+ newRepoScaleSetReq.Body = newScaleSetParams
+ response, err = apiCli.Repositories.CreateRepoScaleSet(newRepoScaleSetReq, authToken)
+ } else if cmd.Flags().Changed("org") {
+ scalesetOrganization, err = resolveOrganization(scalesetOrganization, endpointName)
+ if err != nil {
+ return err
+ }
+ newOrgScaleSetReq := apiClientOrgs.NewCreateOrgScaleSetParams()
+ newOrgScaleSetReq.OrgID = scalesetOrganization
+ newOrgScaleSetReq.Body = newScaleSetParams
+ response, err = apiCli.Organizations.CreateOrgScaleSet(newOrgScaleSetReq, authToken)
+ } else if cmd.Flags().Changed("enterprise") {
+ scalesetEnterprise, err = resolveEnterprise(scalesetEnterprise, endpointName)
+ if err != nil {
+ return err
+ }
+ newEnterpriseScaleSetReq := apiClientEnterprises.NewCreateEnterpriseScaleSetParams()
+ newEnterpriseScaleSetReq.EnterpriseID = scalesetEnterprise
+ newEnterpriseScaleSetReq.Body = newScaleSetParams
+ response, err = apiCli.Enterprises.CreateEnterpriseScaleSet(newEnterpriseScaleSetReq, authToken)
+ } else {
+ cmd.Help() //nolint
+ os.Exit(0)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ formatOneScaleSet(response.GetPayload())
+ return nil
+ },
+}
+
+var scaleSetUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update one scale set",
+ Long: `Updates scale set characteristics.
+
+This command updates the scale set characteristics. Runners already created prior to updating
+the scale set, will not be recreated. If they no longer suit your needs, you will need to
+explicitly remove them using the runner delete command.
+ `,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) == 0 {
+ return fmt.Errorf("command requires a scale set ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ updateScaleSetReq := apiClientScaleSets.NewUpdateScaleSetParams()
+ scaleSetUpdateParams := params.UpdateScaleSetParams{}
+
+ if cmd.Flags().Changed("image") {
+ scaleSetUpdateParams.Image = scalesetImage
+ }
+
+ if cmd.Flags().Changed("name") {
+ scaleSetUpdateParams.Name = scalesetName
+ }
+
+ if cmd.Flags().Changed("flavor") {
+ scaleSetUpdateParams.Flavor = scalesetFlavor
+ }
+
+ if cmd.Flags().Changed("os-type") {
+ scaleSetUpdateParams.OSType = commonParams.OSType(scalesetOSType)
+ }
+
+ if cmd.Flags().Changed("os-arch") {
+ scaleSetUpdateParams.OSArch = commonParams.OSArch(scalesetOSArch)
+ }
+
+ if cmd.Flags().Changed("max-runners") {
+ scaleSetUpdateParams.MaxRunners = &scalesetMaxRunners
+ }
+
+ if cmd.Flags().Changed("min-idle-runners") {
+ scaleSetUpdateParams.MinIdleRunners = &scalesetMinIdleRunners
+ }
+
+ if cmd.Flags().Changed("runner-prefix") {
+ scaleSetUpdateParams.RunnerPrefix = params.RunnerPrefix{
+ Prefix: scalesetRunnerPrefix,
+ }
+ }
+
+ if cmd.Flags().Changed("runner-group") {
+ scaleSetUpdateParams.GitHubRunnerGroup = &scalesetGitHubRunnerGroup
+ }
+
+ if cmd.Flags().Changed("enabled") {
+ scaleSetUpdateParams.Enabled = &scalesetEnabled
+ }
+
+ if cmd.Flags().Changed("runner-bootstrap-timeout") {
+ scaleSetUpdateParams.RunnerBootstrapTimeout = &scalesetRunnerBootstrapTimeout
+ }
+
+ if cmd.Flags().Changed("extra-specs") {
+ data, err := asRawMessage([]byte(scalesetExtraSpecs))
+ if err != nil {
+ return err
+ }
+ scaleSetUpdateParams.ExtraSpecs = data
+ }
+
+ if scalesetExtraSpecsFile != "" {
+ data, err := extraSpecsFromFile(scalesetExtraSpecsFile)
+ if err != nil {
+ return err
+ }
+ scaleSetUpdateParams.ExtraSpecs = data
+ }
+
+ updateScaleSetReq.ScalesetID = args[0]
+ updateScaleSetReq.Body = scaleSetUpdateParams
+ response, err := apiCli.Scalesets.UpdateScaleSet(updateScaleSetReq, authToken)
+ if err != nil {
+ return err
+ }
+
+ formatOneScaleSet(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ scalesetListCmd.Flags().StringVarP(&scalesetRepository, "repo", "r", "", "List all scale sets within this repository.")
+ scalesetListCmd.Flags().StringVarP(&scalesetOrganization, "org", "o", "", "List all scale sets within this organization.")
+ scalesetListCmd.Flags().StringVarP(&scalesetEnterprise, "enterprise", "e", "", "List all scale sets within this enterprise.")
+ scalesetListCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise")
+ scalesetListCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
+
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetImage, "image", "", "The provider-specific image name to use for runners in this scale set.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetFlavor, "flavor", "", "The flavor to use for the runners in this scale set.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetName, "name", "", "The name of the scale set. This option is mandatory.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetOSType, "os-type", "linux", "Operating system type (windows, linux, etc).")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetOSArch, "os-arch", "amd64", "Operating system architecture (amd64, arm, etc).")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetRunnerPrefix, "runner-prefix", "", "The name prefix to use for runners in this scale set.")
+ scaleSetUpdateCmd.Flags().UintVar(&scalesetMaxRunners, "max-runners", 5, "The maximum number of runner this scale set will create.")
+ scaleSetUpdateCmd.Flags().UintVar(&scalesetMinIdleRunners, "min-idle-runners", 1, "Attempt to maintain a minimum of idle self-hosted runners of this type.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetGitHubRunnerGroup, "runner-group", "", "The GitHub runner group in which all runners of this scale set will be added.")
+ scaleSetUpdateCmd.Flags().BoolVar(&scalesetEnabled, "enabled", false, "Enable this scale set.")
+ scaleSetUpdateCmd.Flags().UintVar(&scalesetRunnerBootstrapTimeout, "runner-bootstrap-timeout", 20, "Duration in minutes after which a runner is considered failed if it does not join Github.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetExtraSpecsFile, "extra-specs-file", "", "A file containing a valid json which will be passed to the IaaS provider managing the scale set.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetExtraSpecs, "extra-specs", "", "A valid json which will be passed to the IaaS provider managing the scale set.")
+ scaleSetUpdateCmd.MarkFlagsMutuallyExclusive("extra-specs-file", "extra-specs")
+
+ scaleSetAddCmd.Flags().StringVar(&scalesetProvider, "provider-name", "", "The name of the provider where runners will be created.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetImage, "image", "", "The provider-specific image name to use for runners in this scale set.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetName, "name", "", "The name of the scale set. This option is mandatory.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetFlavor, "flavor", "", "The flavor to use for this runner.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetRunnerPrefix, "runner-prefix", "", "The name prefix to use for runners in this scale set.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetOSType, "os-type", "linux", "Operating system type (windows, linux, etc).")
+ scaleSetAddCmd.Flags().StringVar(&scalesetOSArch, "os-arch", "amd64", "Operating system architecture (amd64, arm, etc).")
+ scaleSetAddCmd.Flags().StringVar(&scalesetExtraSpecsFile, "extra-specs-file", "", "A file containing a valid json which will be passed to the IaaS provider managing the scale set.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetExtraSpecs, "extra-specs", "", "A valid json which will be passed to the IaaS provider managing the scale set.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetGitHubRunnerGroup, "runner-group", "", "The GitHub runner group in which all runners of this scale set will be added.")
+ scaleSetAddCmd.Flags().UintVar(&scalesetMaxRunners, "max-runners", 5, "The maximum number of runner this scale set will create.")
+ scaleSetAddCmd.Flags().UintVar(&scalesetRunnerBootstrapTimeout, "runner-bootstrap-timeout", 20, "Duration in minutes after which a runner is considered failed if it does not join Github.")
+ scaleSetAddCmd.Flags().UintVar(&scalesetMinIdleRunners, "min-idle-runners", 1, "Attempt to maintain a minimum of idle self-hosted runners of this type.")
+ scaleSetAddCmd.Flags().BoolVar(&scalesetEnabled, "enabled", false, "Enable this scale set.")
+ scaleSetAddCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
+ scaleSetAddCmd.MarkFlagRequired("provider-name") //nolint
+ scaleSetAddCmd.MarkFlagRequired("name") //nolint
+ scaleSetAddCmd.MarkFlagRequired("image") //nolint
+ scaleSetAddCmd.MarkFlagRequired("flavor") //nolint
+
+ scaleSetAddCmd.Flags().StringVarP(&scalesetRepository, "repo", "r", "", "Add the new scale set within this repository.")
+ scaleSetAddCmd.Flags().StringVarP(&scalesetOrganization, "org", "o", "", "Add the new scale set within this organization.")
+ scaleSetAddCmd.Flags().StringVarP(&scalesetEnterprise, "enterprise", "e", "", "Add the new scale set within this enterprise.")
+ scaleSetAddCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise")
+ scaleSetAddCmd.MarkFlagsMutuallyExclusive("extra-specs-file", "extra-specs")
+
+ scalesetCmd.AddCommand(
+ scalesetListCmd,
+ scaleSetShowCmd,
+ scaleSetDeleteCmd,
+ scaleSetUpdateCmd,
+ scaleSetAddCmd,
+ )
+
+ rootCmd.AddCommand(scalesetCmd)
+}
+
+func formatScaleSets(scaleSets []params.ScaleSet) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(scaleSets)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"ID", "Scale Set Name", "Image", "Flavor", "Belongs to", "Level", "Runner Group", "Enabled", "Runner Prefix", "Provider"}
+ t.AppendHeader(header)
+
+ for _, scaleSet := range scaleSets {
+ var belongsTo string
+ var level string
+
+ switch {
+ case scaleSet.RepoID != "" && scaleSet.RepoName != "":
+ belongsTo = scaleSet.RepoName
+ level = entityTypeRepo
+ case scaleSet.OrgID != "" && scaleSet.OrgName != "":
+ belongsTo = scaleSet.OrgName
+ level = entityTypeOrg
+ case scaleSet.EnterpriseID != "" && scaleSet.EnterpriseName != "":
+ belongsTo = scaleSet.EnterpriseName
+ level = entityTypeEnterprise
+ }
+ t.AppendRow(table.Row{scaleSet.ID, scaleSet.Name, scaleSet.Image, scaleSet.Flavor, belongsTo, level, scaleSet.GitHubRunnerGroup, scaleSet.Enabled, scaleSet.GetRunnerPrefix(), scaleSet.ProviderName})
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func formatOneScaleSet(scaleSet params.ScaleSet) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(scaleSet)
+ return
+ }
+ t := table.NewWriter()
+ rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
+
+ header := table.Row{"Field", "Value"}
+
+ var belongsTo string
+ var level string
+
+ switch {
+ case scaleSet.RepoID != "" && scaleSet.RepoName != "":
+ belongsTo = scaleSet.RepoName
+ level = entityTypeRepo
+ case scaleSet.OrgID != "" && scaleSet.OrgName != "":
+ belongsTo = scaleSet.OrgName
+ level = entityTypeOrg
+ case scaleSet.EnterpriseID != "" && scaleSet.EnterpriseName != "":
+ belongsTo = scaleSet.EnterpriseName
+ level = entityTypeEnterprise
+ }
+
+ t.AppendHeader(header)
+ t.AppendRow(table.Row{"ID", scaleSet.ID})
+ t.AppendRow(table.Row{"Scale Set ID", scaleSet.ScaleSetID})
+ t.AppendRow(table.Row{"Scale Name", scaleSet.Name})
+ t.AppendRow(table.Row{"Provider Name", scaleSet.ProviderName})
+ t.AppendRow(table.Row{"Image", scaleSet.Image})
+ t.AppendRow(table.Row{"Flavor", scaleSet.Flavor})
+ t.AppendRow(table.Row{"OS Type", scaleSet.OSType})
+ t.AppendRow(table.Row{"OS Architecture", scaleSet.OSArch})
+ t.AppendRow(table.Row{"Max Runners", scaleSet.MaxRunners})
+ t.AppendRow(table.Row{"Min Idle Runners", scaleSet.MinIdleRunners})
+ t.AppendRow(table.Row{"Runner Bootstrap Timeout", scaleSet.RunnerBootstrapTimeout})
+ t.AppendRow(table.Row{"Belongs to", belongsTo})
+ t.AppendRow(table.Row{"Level", level})
+ t.AppendRow(table.Row{"Enabled", scaleSet.Enabled})
+ t.AppendRow(table.Row{"Runner Prefix", scaleSet.GetRunnerPrefix()})
+ t.AppendRow(table.Row{"Extra specs", string(scaleSet.ExtraSpecs)})
+ t.AppendRow(table.Row{"GitHub Runner Group", scaleSet.GitHubRunnerGroup})
+
+ if len(scaleSet.Instances) > 0 {
+ for _, instance := range scaleSet.Instances {
+ t.AppendRow(table.Row{"Instances", fmt.Sprintf("%s (%s)", instance.Name, instance.ID)}, rowConfigAutoMerge)
+ }
+ }
+
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AutoMerge: true},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
+ })
+ fmt.Println(t.Render())
+}
diff --git a/cmd/garm-cli/cmd/util.go b/cmd/garm-cli/cmd/util.go
new file mode 100644
index 00000000..26f57abb
--- /dev/null
+++ b/cmd/garm-cli/cmd/util.go
@@ -0,0 +1,108 @@
+package cmd
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/uuid"
+
+ apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
+ apiClientOrgs "github.com/cloudbase/garm/client/organizations"
+ apiClientRepos "github.com/cloudbase/garm/client/repositories"
+)
+
+func resolveRepository(nameOrID, endpoint string) (string, error) {
+ if nameOrID == "" {
+ return "", fmt.Errorf("missing repository name or ID")
+ }
+ entityID, err := uuid.Parse(nameOrID)
+ if err == nil {
+ return entityID.String(), nil
+ }
+
+ parts := strings.SplitN(nameOrID, "/", 2)
+ if len(parts) < 2 {
+ // format of friendly name is invalid for a repository.
+ // Return the string as is.
+ return nameOrID, nil
+ }
+
+ listReposReq := apiClientRepos.NewListReposParams()
+ listReposReq.Owner = &parts[0]
+ listReposReq.Name = &parts[1]
+ if endpoint != "" {
+ listReposReq.Endpoint = &endpoint
+ }
+ response, err := apiCli.Repositories.ListRepos(listReposReq, authToken)
+ if err != nil {
+ return "", err
+ }
+ if len(response.Payload) == 0 {
+ return "", fmt.Errorf("repository %s was not found", nameOrID)
+ }
+
+ if len(response.Payload) > 1 {
+ return "", fmt.Errorf("multiple repositories with the name %s exist, please use the repository ID or specify the --endpoint parameter", nameOrID)
+ }
+ return response.Payload[0].ID, nil
+}
+
+func resolveOrganization(nameOrID, endpoint string) (string, error) {
+ if nameOrID == "" {
+ return "", fmt.Errorf("missing organization name or ID")
+ }
+ entityID, err := uuid.Parse(nameOrID)
+ if err == nil {
+ return entityID.String(), nil
+ }
+
+ listOrgsReq := apiClientOrgs.NewListOrgsParams()
+ listOrgsReq.Name = &nameOrID
+ if endpoint != "" {
+ listOrgsReq.Endpoint = &endpoint
+ }
+ response, err := apiCli.Organizations.ListOrgs(listOrgsReq, authToken)
+ if err != nil {
+ return "", err
+ }
+
+ if len(response.Payload) == 0 {
+ return "", fmt.Errorf("organization %s was not found", nameOrID)
+ }
+
+ if len(response.Payload) > 1 {
+ return "", fmt.Errorf("multiple organizations with the name %s exist, please use the organization ID or specify the --endpoint parameter", nameOrID)
+ }
+
+ return response.Payload[0].ID, nil
+}
+
+func resolveEnterprise(nameOrID, endpoint string) (string, error) {
+ if nameOrID == "" {
+ return "", fmt.Errorf("missing enterprise name or ID")
+ }
+ entityID, err := uuid.Parse(nameOrID)
+ if err == nil {
+ return entityID.String(), nil
+ }
+
+ listEnterprisesReq := apiClientEnterprises.NewListEnterprisesParams()
+ listEnterprisesReq.Name = &enterpriseName
+ if endpoint != "" {
+ listEnterprisesReq.Endpoint = &endpoint
+ }
+ response, err := apiCli.Enterprises.ListEnterprises(listEnterprisesReq, authToken)
+ if err != nil {
+ return "", err
+ }
+
+ if len(response.Payload) == 0 {
+ return "", fmt.Errorf("enterprise %s was not found", nameOrID)
+ }
+
+ if len(response.Payload) > 1 {
+ return "", fmt.Errorf("multiple enterprises with the name %s exist, please use the enterprise ID or specify the --endpoint parameter", nameOrID)
+ }
+
+ return response.Payload[0].ID, nil
+}
diff --git a/cmd/garm-cli/cmd/version.go b/cmd/garm-cli/cmd/version.go
index 99253aed..ce51142f 100644
--- a/cmd/garm-cli/cmd/version.go
+++ b/cmd/garm-cli/cmd/version.go
@@ -18,6 +18,9 @@ import (
"fmt"
"github.com/spf13/cobra"
+
+ apiClientControllerInfo "github.com/cloudbase/garm/client/controller_info"
+ "github.com/cloudbase/garm/util/appdefaults"
)
// runnerCmd represents the runner command
@@ -25,8 +28,19 @@ var versionCmd = &cobra.Command{
Use: "version",
SilenceUsage: true,
Short: "Print version and exit",
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println(Version)
+ Run: func(_ *cobra.Command, _ []string) {
+ serverVersion := "v0.0.0-unknown"
+
+ if !needsInit {
+ showInfo := apiClientControllerInfo.NewControllerInfoParams()
+ response, err := apiCli.ControllerInfo.ControllerInfo(showInfo, authToken)
+ if err == nil {
+ serverVersion = response.Payload.Version
+ }
+ }
+
+ fmt.Printf("garm-cli: %s\n", appdefaults.GetVersion())
+ fmt.Printf("garm server: %s\n", serverVersion)
},
}
diff --git a/cmd/garm-cli/common/cobra.go b/cmd/garm-cli/common/cobra.go
new file mode 100644
index 00000000..399a4b92
--- /dev/null
+++ b/cmd/garm-cli/common/cobra.go
@@ -0,0 +1,44 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package common
+
+import "fmt"
+
+type OutputFormat string
+
+const (
+ OutputFormatTable OutputFormat = "table"
+ OutputFormatJSON OutputFormat = "json"
+)
+
+func (o *OutputFormat) String() string {
+ if o == nil {
+ return ""
+ }
+ return string(*o)
+}
+
+func (o *OutputFormat) Set(value string) error {
+ switch value {
+ case "table", "json":
+ *o = OutputFormat(value)
+ default:
+ return fmt.Errorf("allowed formats are: json, table")
+ }
+ return nil
+}
+
+func (o *OutputFormat) Type() string {
+ return "string"
+}
diff --git a/cmd/garm-cli/common/common.go b/cmd/garm-cli/common/common.go
index 8164b9a7..1f607cb4 100644
--- a/cmd/garm-cli/common/common.go
+++ b/cmd/garm-cli/common/common.go
@@ -15,13 +15,22 @@
package common
import (
+ "encoding/json"
"errors"
+ "fmt"
+ "os"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
"github.com/manifoldco/promptui"
"github.com/nbutton23/zxcvbn-go"
+
+ "github.com/cloudbase/garm-provider-common/util"
)
-func PromptPassword(label string) (string, error) {
+func PromptPassword(label string, compareTo string) (string, error) {
if label == "" {
label = "Password"
}
@@ -30,6 +39,9 @@ func PromptPassword(label string) (string, error) {
if passwordStenght.Score < 4 {
return errors.New("password is too weak")
}
+ if compareTo != "" && compareTo != input {
+ return errors.New("passwords do not match")
+ }
return nil
}
@@ -39,14 +51,13 @@ func PromptPassword(label string) (string, error) {
Mask: '*',
}
result, err := prompt.Run()
-
if err != nil {
return "", err
}
return result, nil
}
-func PromptString(label string) (string, error) {
+func PromptString(label string, a ...interface{}) (string, error) {
validate := func(input string) error {
if len(input) == 0 {
return errors.New("empty input not allowed")
@@ -55,13 +66,260 @@ func PromptString(label string) (string, error) {
}
prompt := promptui.Prompt{
- Label: label,
+ Label: fmt.Sprintf(label, a...),
Validate: validate,
}
result, err := prompt.Run()
-
if err != nil {
return "", err
}
return result, nil
}
+
+func PrintWebsocketMessage(_ int, msg []byte) error {
+ fmt.Println(util.SanitizeLogEntry(string(msg)))
+ return nil
+}
+
+type LogFormatter struct {
+ MinLevel string
+ AttributeFilters map[string]string
+ EnableColor bool
+}
+
+type LogRecord struct {
+ Time string `json:"time"`
+ Level string `json:"level"`
+ Msg string `json:"msg"`
+ Attrs map[string]interface{} `json:",inline"`
+}
+
+// Color codes for different log levels
+const (
+ ColorReset = "\033[0m"
+ ColorRed = "\033[31m"
+ ColorYellow = "\033[33m"
+ ColorBlue = "\033[34m"
+ ColorMagenta = "\033[35m"
+ ColorCyan = "\033[36m"
+ ColorWhite = "\033[37m"
+ ColorGray = "\033[90m"
+)
+
+func (lf *LogFormatter) colorizeLevel(level string) string {
+ if !lf.EnableColor {
+ return level
+ }
+
+ levelUpper := strings.TrimSpace(strings.ToUpper(level))
+ switch levelUpper {
+ case "ERROR":
+ return ColorRed + level + ColorReset
+ case "WARN", "WARNING":
+ return ColorYellow + level + ColorReset
+ case "INFO":
+ return ColorBlue + level + ColorReset
+ case "DEBUG":
+ return ColorMagenta + level + ColorReset
+ default:
+ return level
+ }
+}
+
+func (lf *LogFormatter) shouldFilterLevel(level string) bool {
+ if lf.MinLevel == "" {
+ return false
+ }
+
+ levelMap := map[string]int{
+ "DEBUG": 0,
+ "INFO": 1,
+ "WARN": 2,
+ "ERROR": 3,
+ }
+
+ minLevelNum, exists := levelMap[strings.ToUpper(lf.MinLevel)]
+ if !exists {
+ return false
+ }
+
+ currentLevelNum, exists := levelMap[strings.ToUpper(level)]
+ if !exists {
+ return false
+ }
+
+ return currentLevelNum < minLevelNum
+}
+
+func (lf *LogFormatter) matchesAttributeFilters(attrs map[string]interface{}, msg string) bool {
+ if len(lf.AttributeFilters) == 0 {
+ return true
+ }
+
+ for key, expectedValue := range lf.AttributeFilters {
+ // Special handling for message filtering
+ if key == "msg" {
+ if strings.Contains(msg, expectedValue) {
+ return true
+ }
+ }
+
+ // Regular attribute filtering
+ actualValue, exists := attrs[key]
+ if exists {
+ actualStr := fmt.Sprintf("%v", actualValue)
+ if actualStr == expectedValue {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func (lf *LogFormatter) FormatWebsocketMessage(_ int, msg []byte) error {
+ // Try to parse as JSON log record
+ var logRecord LogRecord
+ err := json.Unmarshal(msg, &logRecord)
+ if err != nil {
+ // If it's not JSON, print as-is (sanitized)
+ _, err = fmt.Println(util.SanitizeLogEntry(string(msg)))
+ return err
+ }
+
+ // Apply level filtering
+ if lf.shouldFilterLevel(logRecord.Level) {
+ return nil
+ }
+
+ // Parse additional attributes from the JSON
+ var fullRecord map[string]interface{}
+ if err := json.Unmarshal(msg, &fullRecord); err == nil {
+ // Remove standard fields and keep only attributes
+ delete(fullRecord, "time")
+ delete(fullRecord, "level")
+ delete(fullRecord, "msg")
+ logRecord.Attrs = fullRecord
+ }
+
+ // Apply attribute filtering
+ if !lf.matchesAttributeFilters(logRecord.Attrs, logRecord.Msg) {
+ return nil
+ }
+
+ // Format timestamp to fixed width
+ timeStr := logRecord.Time
+ if t, err := time.Parse(time.RFC3339Nano, logRecord.Time); err == nil {
+ timeStr = t.Format("2006-01-02 15:04:05.000")
+ }
+
+ // Format log level to fixed width (5 characters)
+ levelStr := lf.colorizeLevel(fmt.Sprintf("%-5s", strings.ToUpper(logRecord.Level)))
+
+ // Highlight message if it matches a msg filter
+ msgStr := logRecord.Msg
+ if msgFilter, hasMsgFilter := lf.AttributeFilters["msg"]; hasMsgFilter {
+ if strings.Contains(msgStr, msgFilter) && lf.EnableColor {
+ msgStr = ColorYellow + msgStr + ColorReset
+ }
+ }
+
+ output := fmt.Sprintf("%s [%s] %s", timeStr, levelStr, msgStr)
+
+ // Add attributes if any
+ if len(logRecord.Attrs) > 0 {
+ // Get sorted keys for consistent output
+ var keys []string
+ for k := range logRecord.Attrs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ var attrPairs []string
+ for _, k := range keys {
+ v := logRecord.Attrs[k]
+ attrStr := fmt.Sprintf("%s=%v", k, v)
+
+ // Highlight filtered attributes
+ if filterValue, isFiltered := lf.AttributeFilters[k]; isFiltered && fmt.Sprintf("%v", v) == filterValue {
+ if lf.EnableColor {
+ attrStr = ColorYellow + attrStr + ColorGray
+ }
+ } else if lf.EnableColor {
+ attrStr = ColorGray + attrStr
+ }
+
+ attrPairs = append(attrPairs, attrStr)
+ }
+ if len(attrPairs) > 0 {
+ if lf.EnableColor {
+ output += " " + strings.Join(attrPairs, " ") + ColorReset
+ } else {
+ output += " " + strings.Join(attrPairs, " ")
+ }
+ }
+ }
+
+ fmt.Println(output)
+ return nil
+}
+
+// supportsColor checks if the current terminal/environment supports ANSI colors.
+// This is best effort. There is no reliable way to determine if a terminal supports
+// color. Set NO_COLOR=1 to disable color if your terminal doesn't support it, but this
+// function returns true.
+func supportsColor() bool {
+ // Check NO_COLOR environment variable (universal standard)
+ if os.Getenv("NO_COLOR") != "" {
+ return false
+ }
+
+ // Check FORCE_COLOR environment variable
+ if os.Getenv("FORCE_COLOR") != "" {
+ return true
+ }
+
+ // On Windows, check for modern terminal support
+ if runtime.GOOS == "windows" {
+ // Check for Windows Terminal
+ if os.Getenv("WT_SESSION") != "" {
+ return true
+ }
+ // Check for ConEmu
+ if os.Getenv("ConEmuANSI") == "ON" {
+ return true
+ }
+ // Check for other modern terminals
+ term := os.Getenv("TERM")
+ if strings.Contains(term, "color") || term == "xterm-256color" || term == "screen-256color" {
+ return true
+ }
+ // Modern PowerShell and cmd.exe with VT processing
+ if os.Getenv("TERM_PROGRAM") != "" {
+ return true
+ }
+ // Default to false for older Windows cmd.exe
+ return false
+ }
+
+ // On Unix-like systems, check TERM
+ term := os.Getenv("TERM")
+ if term == "" || term == "dumb" {
+ return false
+ }
+
+ return true
+}
+
+func NewLogFormatter(minLevel string, attributeFilters map[string]string, color bool) *LogFormatter {
+ var enableColor bool
+ if color && supportsColor() {
+ enableColor = true
+ }
+
+ return &LogFormatter{
+ MinLevel: minLevel,
+ AttributeFilters: attributeFilters,
+ EnableColor: enableColor,
+ }
+}
diff --git a/cmd/garm-cli/config/config.go b/cmd/garm-cli/config/config.go
index 6f6b197c..cf1cf1d2 100644
--- a/cmd/garm-cli/config/config.go
+++ b/cmd/garm-cli/config/config.go
@@ -15,13 +15,13 @@
package config
import (
+ "errors"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/BurntSushi/toml"
- "github.com/pkg/errors"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
)
@@ -34,11 +34,11 @@ const (
func getConfigFilePath() (string, error) {
configDir, err := getHomeDir()
if err != nil {
- return "", errors.Wrap(err, "fetching home folder")
+ return "", fmt.Errorf("error fetching home folder: %w", err)
}
if err := ensureHomeDir(configDir); err != nil {
- return "", errors.Wrap(err, "ensuring config dir")
+ return "", fmt.Errorf("error ensuring config dir: %w", err)
}
cfgFile := filepath.Join(configDir, DefaultConfigFileName)
@@ -48,7 +48,7 @@ func getConfigFilePath() (string, error) {
func LoadConfig() (*Config, error) {
cfgFile, err := getConfigFilePath()
if err != nil {
- return nil, errors.Wrap(err, "fetching config")
+ return nil, fmt.Errorf("error fetching config: %w", err)
}
if _, err := os.Stat(cfgFile); err != nil {
@@ -56,12 +56,12 @@ func LoadConfig() (*Config, error) {
// return empty config
return &Config{}, nil
}
- return nil, errors.Wrap(err, "accessing config file")
+ return nil, fmt.Errorf("error accessing config file: %w", err)
}
var config Config
if _, err := toml.DecodeFile(cfgFile, &config); err != nil {
- return nil, errors.Wrap(err, "decoding toml")
+ return nil, fmt.Errorf("error decoding toml: %w", err)
}
return &config, nil
@@ -157,17 +157,17 @@ func (c *Config) SaveConfig() error {
cfgFile, err := getConfigFilePath()
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
- return errors.Wrap(err, "getting config")
+ return fmt.Errorf("error getting config: %w", err)
}
}
cfgHandle, err := os.Create(cfgFile)
if err != nil {
- return errors.Wrap(err, "getting file handle")
+ return fmt.Errorf("error getting file handle: %w", err)
}
encoder := toml.NewEncoder(cfgHandle)
if err := encoder.Encode(c); err != nil {
- return errors.Wrap(err, "saving config")
+ return fmt.Errorf("error saving config: %w", err)
}
return nil
diff --git a/cmd/garm-cli/config/home.go b/cmd/garm-cli/config/home.go
index b6043289..11821e9c 100644
--- a/cmd/garm-cli/config/home.go
+++ b/cmd/garm-cli/config/home.go
@@ -15,19 +15,19 @@
package config
import (
+ "errors"
+ "fmt"
"os"
-
- "github.com/pkg/errors"
)
func ensureHomeDir(folder string) error {
if _, err := os.Stat(folder); err != nil {
if !errors.Is(err, os.ErrNotExist) {
- return errors.Wrap(err, "checking home dir")
+ return fmt.Errorf("error checking home dir: %w", err)
}
if err := os.MkdirAll(folder, 0o710); err != nil {
- return errors.Wrapf(err, "creating %s", folder)
+ return fmt.Errorf("error creating %s: %w", folder, err)
}
}
diff --git a/cmd/garm-cli/config/home_nix.go b/cmd/garm-cli/config/home_nix.go
index 92c99bad..323f29d7 100644
--- a/cmd/garm-cli/config/home_nix.go
+++ b/cmd/garm-cli/config/home_nix.go
@@ -1,20 +1,31 @@
//go:build !windows
// +build !windows
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package config
import (
+ "fmt"
"os"
"path/filepath"
-
- "github.com/pkg/errors"
)
func getHomeDir() (string, error) {
home, err := os.UserHomeDir()
-
if err != nil {
- return "", errors.Wrap(err, "fetching home dir")
+ return "", fmt.Errorf("error fetching home dir: %w", err)
}
return filepath.Join(home, ".local", "share", DefaultAppFolder), nil
diff --git a/cmd/garm-cli/config/home_windows.go b/cmd/garm-cli/config/home_windows.go
index d34379b4..c70fb645 100644
--- a/cmd/garm-cli/config/home_windows.go
+++ b/cmd/garm-cli/config/home_windows.go
@@ -1,6 +1,19 @@
//go:build windows && !linux
// +build windows,!linux
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package config
import (
diff --git a/cmd/garm/main.go b/cmd/garm/main.go
index 04a45f0b..cba3a064 100644
--- a/cmd/garm/main.go
+++ b/cmd/garm/main.go
@@ -18,15 +18,20 @@ import (
"context"
"flag"
"fmt"
- "io"
"log"
+ "log/slog"
"net"
"net/http"
"os"
"os/signal"
+ "runtime"
"syscall"
"time"
+ "github.com/gorilla/handlers"
+ "github.com/gorilla/mux"
+ lumberjack "gopkg.in/natefinch/lumberjack.v2"
+
"github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/apiserver/controllers"
"github.com/cloudbase/garm/apiserver/routers"
@@ -34,15 +39,19 @@ import (
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database"
"github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ "github.com/cloudbase/garm/locking"
"github.com/cloudbase/garm/metrics"
- "github.com/cloudbase/garm/runner"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner" //nolint:typecheck
+ runnerMetrics "github.com/cloudbase/garm/runner/metrics"
+ "github.com/cloudbase/garm/runner/providers"
+ garmUtil "github.com/cloudbase/garm/util"
"github.com/cloudbase/garm/util/appdefaults"
"github.com/cloudbase/garm/websocket"
- lumberjack "gopkg.in/natefinch/lumberjack.v2"
-
- "github.com/gorilla/handlers"
- "github.com/gorilla/mux"
- "github.com/pkg/errors"
+ "github.com/cloudbase/garm/workers/cache"
+ "github.com/cloudbase/garm/workers/entity"
+ "github.com/cloudbase/garm/workers/provider"
)
var (
@@ -50,41 +59,26 @@ var (
version = flag.Bool("version", false, "prints version")
)
-var Version string
-
var signals = []os.Signal{
os.Interrupt,
syscall.SIGTERM,
}
-func maybeInitController(db common.Store) error {
- if _, err := db.ControllerInfo(); err == nil {
- return nil
+func maybeInitController(db common.Store) (params.ControllerInfo, error) {
+ if info, err := db.ControllerInfo(); err == nil {
+ return info, nil
}
- if _, err := db.InitController(); err != nil {
- return errors.Wrap(err, "initializing controller")
+ info, err := db.InitController()
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error initializing controller: %w", err)
}
- return nil
+ return info, nil
}
-func main() {
- flag.Parse()
- if *version {
- fmt.Println(Version)
- return
- }
- ctx, stop := signal.NotifyContext(context.Background(), signals...)
- defer stop()
- fmt.Println(ctx)
-
- cfg, err := config.NewConfig(*conf)
- if err != nil {
- log.Fatalf("Fetching config: %+v", err)
- }
-
- logWriter, err := util.GetLoggingWriter(cfg.Default.LogFile)
+func setupLogging(ctx context.Context, logCfg config.Logging, hub *websocket.Hub) {
+ logWriter, err := util.GetLoggingWriter(logCfg.LogFile)
if err != nil {
log.Fatalf("fetching log writer: %+v", err)
}
@@ -102,38 +96,179 @@ func main() {
// we got a SIGHUP. Rotate log file.
if logger, ok := logWriter.(*lumberjack.Logger); ok {
if err := logger.Rotate(); err != nil {
- log.Printf("failed to rotate log file: %v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to rotate log file")
}
}
}
}
}()
- var writers []io.Writer = []io.Writer{
- logWriter,
+ var logLevel slog.Level
+ switch logCfg.LogLevel {
+ case config.LevelDebug:
+ logLevel = slog.LevelDebug
+ case config.LevelInfo:
+ logLevel = slog.LevelInfo
+ case config.LevelWarn:
+ logLevel = slog.LevelWarn
+ case config.LevelError:
+ logLevel = slog.LevelError
+ default:
+ logLevel = slog.LevelInfo
}
+
+ // logger options
+ opts := slog.HandlerOptions{
+ AddSource: logCfg.LogSource,
+ Level: logLevel,
+ }
+
+ var fileHan slog.Handler
+ switch logCfg.LogFormat {
+ case config.FormatJSON:
+ fileHan = slog.NewJSONHandler(logWriter, &opts)
+ default:
+ fileHan = slog.NewTextHandler(logWriter, &opts)
+ }
+
+ handlers := []slog.Handler{
+ fileHan,
+ }
+
+ if hub != nil {
+ wsHan := slog.NewJSONHandler(hub, &opts)
+ handlers = append(handlers, wsHan)
+ }
+
+ wrapped := &garmUtil.SlogMultiHandler{
+ Handlers: handlers,
+ }
+ slog.SetDefault(slog.New(wrapped))
+}
+
+func maybeUpdateURLsFromConfig(cfg config.Config, store common.Store) error {
+ info, err := store.ControllerInfo()
+ if err != nil {
+ return fmt.Errorf("error fetching controller info: %w", err)
+ }
+
+ var updateParams params.UpdateControllerParams
+
+ if info.MetadataURL == "" && cfg.Default.MetadataURL != "" {
+ updateParams.MetadataURL = &cfg.Default.MetadataURL
+ }
+
+ if info.CallbackURL == "" && cfg.Default.CallbackURL != "" {
+ updateParams.CallbackURL = &cfg.Default.CallbackURL
+ }
+
+ if info.WebhookURL == "" && cfg.Default.WebhookURL != "" {
+ updateParams.WebhookURL = &cfg.Default.WebhookURL
+ }
+
+ if updateParams.MetadataURL == nil && updateParams.CallbackURL == nil && updateParams.WebhookURL == nil {
+ // nothing to update
+ return nil
+ }
+
+ _, err = store.UpdateController(updateParams)
+ if err != nil {
+ return fmt.Errorf("error updating controller info: %w", err)
+ }
+ return nil
+}
+
+//gocyclo:ignore
+func main() {
+ flag.Parse()
+ if *version {
+ fmt.Println(appdefaults.GetVersion())
+ return
+ }
+ ctx, stop := signal.NotifyContext(context.Background(), signals...)
+ defer stop()
+ watcher.InitWatcher(ctx)
+
+ ctx = auth.GetAdminContext(ctx)
+
+ cfg, err := config.NewConfig(*conf)
+ if err != nil {
+ log.Fatalf("Fetching config: %+v", err) //nolint:gocritic
+ }
+
+ logCfg := cfg.GetLoggingConfig()
var hub *websocket.Hub
- if cfg.Default.EnableLogStreamer {
+ if logCfg.EnableLogStreamer != nil && *logCfg.EnableLogStreamer {
hub = websocket.NewHub(ctx)
if err := hub.Start(); err != nil {
log.Fatal(err)
}
defer hub.Stop() //nolint
- writers = append(writers, hub)
}
+ setupLogging(ctx, logCfg, hub)
- multiWriter := io.MultiWriter(writers...)
- log.SetOutput(multiWriter)
-
+ // Migrate credentials to the new format. This field will be read
+ // by the DB migration logic.
+ cfg.Database.MigrateCredentials = cfg.Github
db, err := database.NewDatabase(ctx, cfg.Database)
if err != nil {
log.Fatal(err)
}
- if err := maybeInitController(db); err != nil {
+ controllerInfo, err := maybeInitController(db)
+ if err != nil {
log.Fatal(err)
}
+ // Local locker for now. Will be configurable in the future,
+ // as we add scale-out capability to GARM.
+ lock, err := locking.NewLocalLocker(ctx, db)
+ if err != nil {
+ log.Fatalf("failed to create locker: %q", err)
+ }
+
+ if err := locking.RegisterLocker(lock); err != nil {
+ log.Fatalf("failed to register locker: %q", err)
+ }
+
+ if err := maybeUpdateURLsFromConfig(*cfg, db); err != nil {
+ log.Fatal(err)
+ }
+
+ cacheWorker := cache.NewWorker(ctx, db)
+ if err != nil {
+ log.Fatalf("failed to create cache worker: %+v", err)
+ }
+ if err := cacheWorker.Start(); err != nil {
+ log.Fatalf("failed to start cache worker: %+v", err)
+ }
+
+ providers, err := providers.LoadProvidersFromConfig(ctx, *cfg, controllerInfo.ControllerID.String())
+ if err != nil {
+ log.Fatalf("loading providers: %+v", err)
+ }
+
+ entityController, err := entity.NewController(ctx, db, providers)
+ if err != nil {
+ log.Fatalf("failed to create entity controller: %+v", err)
+ }
+ if err := entityController.Start(); err != nil {
+ log.Fatalf("failed to start entity controller: %+v", err)
+ }
+
+ instanceTokenGetter, err := auth.NewInstanceTokenGetter(cfg.JWTAuth.Secret)
+ if err != nil {
+ log.Fatalf("failed to create instance token getter: %+v", err)
+ }
+
+ providerWorker, err := provider.NewWorker(ctx, db, providers, instanceTokenGetter)
+ if err != nil {
+ log.Fatalf("failed to create provider worker: %+v", err)
+ }
+ if err := providerWorker.Start(); err != nil {
+ log.Fatalf("failed to start provider worker: %+v", err)
+ }
+
runner, err := runner.NewRunner(ctx, *cfg, db)
if err != nil {
log.Fatalf("failed to create controller: %+v", err)
@@ -145,7 +280,7 @@ func main() {
}
authenticator := auth.NewAuthenticator(cfg.JWTAuth, db)
- controller, err := controllers.NewAPIController(runner, authenticator, hub)
+ controller, err := controllers.NewAPIController(runner, authenticator, hub, cfg.APIServer)
if err != nil {
log.Fatalf("failed to create controller: %+v", err)
}
@@ -165,24 +300,39 @@ func main() {
log.Fatal(err)
}
+ urlsRequiredMiddleware, err := auth.NewUrlsRequiredMiddleware(db)
+ if err != nil {
+ log.Fatal(err)
+ }
+
metricsMiddleware, err := auth.NewMetricsMiddleware(cfg.JWTAuth)
if err != nil {
log.Fatal(err)
}
- router := routers.NewAPIRouter(controller, multiWriter, jwtMiddleware, initMiddleware, instanceMiddleware)
+ router := routers.NewAPIRouter(controller, jwtMiddleware, initMiddleware, urlsRequiredMiddleware, instanceMiddleware, cfg.Default.EnableWebhookManagement)
+ // Add WebUI routes
+ router = routers.WithWebUI(router, cfg.APIServer)
+
+ // start the metrics collector
if cfg.Metrics.Enable {
- log.Printf("registering prometheus metrics collectors")
- if err := metrics.RegisterCollectors(runner); err != nil {
+ slog.InfoContext(ctx, "setting up metric routes")
+ router = routers.WithMetricsRouter(router, cfg.Metrics.DisableAuth, metricsMiddleware)
+
+ slog.InfoContext(ctx, "register metrics")
+ if err := metrics.RegisterMetrics(); err != nil {
log.Fatal(err)
}
- log.Printf("setting up metric routes")
- router = routers.WithMetricsRouter(router, cfg.Metrics.DisableAuth, metricsMiddleware)
+
+ slog.InfoContext(ctx, "start metrics collection")
+ runnerMetrics.CollectObjectMetric(ctx, runner, cfg.Metrics.Duration())
}
if cfg.Default.DebugServer {
- log.Printf("setting up debug routes")
+ runtime.SetBlockProfileRate(1)
+ runtime.SetMutexProfileFraction(1)
+ slog.InfoContext(ctx, "setting up debug routes")
router = routers.WithDebugServer(router)
}
@@ -193,6 +343,8 @@ func main() {
methodsOk := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "OPTIONS", "DELETE"})
headersOk := handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"})
+ // nolint:golangci-lint,gosec
+ // G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server
srv := &http.Server{
Addr: cfg.APIServer.BindAddress(),
// Pass our instance of gorilla/mux in.
@@ -207,25 +359,41 @@ func main() {
go func() {
if cfg.APIServer.UseTLS {
if err := srv.ServeTLS(listener, cfg.APIServer.TLSConfig.CRT, cfg.APIServer.TLSConfig.Key); err != nil {
- log.Printf("Listening: %+v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "Listening")
}
} else {
if err := srv.Serve(listener); err != http.ErrServerClosed {
- log.Printf("Listening: %+v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "Listening")
}
}
}()
<-ctx.Done()
+
+ slog.InfoContext(ctx, "shutting down http server")
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 60*time.Second)
defer shutdownCancel()
if err := srv.Shutdown(shutdownCtx); err != nil {
- log.Printf("graceful api server shutdown failed: %+v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "graceful api server shutdown failed")
}
- log.Printf("waiting for runner to stop")
+ if err := cacheWorker.Stop(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to stop credentials worker")
+ }
+
+ slog.InfoContext(ctx, "shutting down entity controller")
+ if err := entityController.Stop(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to stop entity controller")
+ }
+
+ slog.InfoContext(ctx, "shutting down provider worker")
+ if err := providerWorker.Stop(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to stop provider worker")
+ }
+
+ slog.With(slog.Any("error", err)).InfoContext(ctx, "waiting for runner to stop")
if err := runner.Wait(); err != nil {
- log.Printf("failed to shutdown workers: %+v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to shutdown workers")
os.Exit(1)
}
}
diff --git a/config/config.go b/config/config.go
index a65c4668..31a16ae2 100644
--- a/config/config.go
+++ b/config/config.go
@@ -15,40 +15,78 @@
package config
import (
+ "context"
"crypto/tls"
"crypto/x509"
+ "encoding/pem"
"fmt"
- "log"
+ "log/slog"
"net"
+ "net/http"
"net/url"
"os"
"path/filepath"
"time"
"github.com/BurntSushi/toml"
+ "github.com/bradleyfalzon/ghinstallation/v2"
+ zxcvbn "github.com/nbutton23/zxcvbn-go"
+ "golang.org/x/oauth2"
+
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/util/appdefaults"
- zxcvbn "github.com/nbutton23/zxcvbn-go"
- "github.com/pkg/errors"
)
-type DBBackendType string
+type (
+ DBBackendType string
+ LogLevel string
+ LogFormat string
+ GithubAuthType string
+)
const (
// MySQLBackend represents the MySQL DB backend
MySQLBackend DBBackendType = "mysql"
// SQLiteBackend represents the SQLite3 DB backend
SQLiteBackend DBBackendType = "sqlite3"
+ // EnvironmentVariablePrefix is the prefix for all environment variables
+ // that can not be used to get overwritten via the external provider
+ EnvironmentVariablePrefix = "GARM"
+)
+
+const (
+ // LevelDebug is the debug log level
+ LevelDebug LogLevel = "debug"
+ // LevelInfo is the info log level
+ LevelInfo LogLevel = "info"
+ // LevelWarn is the warn log level
+ LevelWarn LogLevel = "warn"
+ // LevelError is the error log level
+ LevelError LogLevel = "error"
+)
+
+const (
+ // FormatText is the text log format
+ FormatText LogFormat = "text"
+ // FormatJSON is the json log format
+ FormatJSON LogFormat = "json"
+)
+
+const (
+ // GithubAuthTypePAT is the OAuth token based authentication
+ GithubAuthTypePAT GithubAuthType = "pat"
+ // GithubAuthTypeApp is the GitHub App based authentication
+ GithubAuthTypeApp GithubAuthType = "app"
)
// NewConfig returns a new Config
func NewConfig(cfgFile string) (*Config, error) {
var config Config
if _, err := toml.DecodeFile(cfgFile, &config); err != nil {
- return nil, errors.Wrap(err, "decoding toml")
+ return nil, fmt.Errorf("error decoding toml: %w", err)
}
if err := config.Validate(); err != nil {
- return nil, errors.Wrap(err, "validating config")
+ return nil, fmt.Errorf("error validating config: %w", err)
}
return &config, nil
}
@@ -61,38 +99,43 @@ type Config struct {
Providers []Provider `toml:"provider,omitempty" json:"provider,omitempty"`
Github []Github `toml:"github,omitempty"`
JWTAuth JWTAuth `toml:"jwt_auth" json:"jwt-auth"`
+ Logging Logging `toml:"logging" json:"logging"`
}
// Validate validates the config
func (c *Config) Validate() error {
if err := c.APIServer.Validate(); err != nil {
- return errors.Wrap(err, "validating APIServer config")
+ return fmt.Errorf("error validating apiserver config: %w", err)
}
if err := c.Database.Validate(); err != nil {
- return errors.Wrap(err, "validating database config")
+ return fmt.Errorf("error validating database config: %w", err)
}
if err := c.Default.Validate(); err != nil {
- return errors.Wrap(err, "validating default section")
+ return fmt.Errorf("error validating default config: %w", err)
}
for _, gh := range c.Github {
if err := gh.Validate(); err != nil {
- return errors.Wrap(err, "validating github config")
+ return fmt.Errorf("error validating github config: %w", err)
}
}
if err := c.JWTAuth.Validate(); err != nil {
- return errors.Wrap(err, "validating jwt config")
+ return fmt.Errorf("error validating jwt_auth config: %w", err)
+ }
+
+ if err := c.Logging.Validate(); err != nil {
+ return fmt.Errorf("error validating logging config: %w", err)
}
providerNames := map[string]int{}
for _, provider := range c.Providers {
if err := provider.Validate(); err != nil {
- return errors.Wrap(err, "validating provider")
+ return fmt.Errorf("error validating provider %s: %w", provider.Name, err)
}
- providerNames[provider.Name] += 1
+ providerNames[provider.Name]++
}
for name, count := range providerNames {
@@ -104,32 +147,133 @@ func (c *Config) Validate() error {
return nil
}
+func (c *Config) GetLoggingConfig() Logging {
+ logging := c.Logging
+ if logging.LogFormat == "" {
+ logging.LogFormat = FormatText
+ }
+
+ if logging.LogLevel == "" {
+ logging.LogLevel = LevelInfo
+ }
+
+ // maintain backwards compatibility
+ if logging.LogFile == "" && c.Default.LogFile != "" {
+ logging.LogFile = c.Default.LogFile
+ }
+ if logging.EnableLogStreamer == nil && c.Default.EnableLogStreamer != nil {
+ logging.EnableLogStreamer = c.Default.EnableLogStreamer
+ }
+
+ return logging
+}
+
+type Logging struct {
+ // LogFile is the location of the log file.
+ LogFile string `toml:"log_file,omitempty" json:"log-file"`
+ // EnableLogStreamer enables the log streamer over websockets.
+ EnableLogStreamer *bool `toml:"enable_log_streamer,omitempty" json:"enable-log-streamer,omitempty"`
+ // LogLevel is the log level.
+ LogLevel LogLevel `toml:"log_level" json:"log-format"`
+ // LogFormat is the log format.
+ LogFormat LogFormat `toml:"log_format" json:"log-level"`
+ // LogSource enables the log source.
+ LogSource bool `toml:"log_source" json:"log-source"`
+}
+
+func (l *Logging) Validate() error {
+ if l.LogLevel != LevelDebug && l.LogLevel != LevelInfo && l.LogLevel != LevelWarn && l.LogLevel != LevelError && l.LogLevel != "" {
+ return fmt.Errorf("invalid log level: %s", l.LogLevel)
+ }
+
+ if l.LogFormat != FormatText && l.LogFormat != FormatJSON && l.LogFormat != "" {
+ return fmt.Errorf("invalid log format: %s", l.LogFormat)
+ }
+
+ return nil
+}
+
type Default struct {
// CallbackURL is the URL where the instances can send back status reports.
CallbackURL string `toml:"callback_url" json:"callback-url"`
// MetadataURL is the URL where instances can fetch information they may need
// to set themselves up.
MetadataURL string `toml:"metadata_url" json:"metadata-url"`
+ // WebhookURL is the URL that will be installed as a webhook target in github.
+ WebhookURL string `toml:"webhook_url" json:"webhook-url"`
+ // EnableWebhookManagement enables the webhook management API.
+ EnableWebhookManagement bool `toml:"enable_webhook_management" json:"enable-webhook-management"`
+
// LogFile is the location of the log file.
LogFile string `toml:"log_file,omitempty" json:"log-file"`
- EnableLogStreamer bool `toml:"enable_log_streamer"`
+ EnableLogStreamer *bool `toml:"enable_log_streamer,omitempty" json:"enable-log-streamer,omitempty"`
DebugServer bool `toml:"debug_server" json:"debug-server"`
}
func (d *Default) Validate() error {
- if d.CallbackURL == "" {
- return fmt.Errorf("missing callback_url")
- }
- _, err := url.Parse(d.CallbackURL)
- if err != nil {
- return errors.Wrap(err, "validating callback_url")
+ if d.CallbackURL != "" {
+ _, err := url.ParseRequestURI(d.CallbackURL)
+ if err != nil {
+ return fmt.Errorf("invalid callback_url: %w", err)
+ }
}
- if d.MetadataURL == "" {
- return fmt.Errorf("missing metadata-url")
+ if d.MetadataURL != "" {
+ if _, err := url.ParseRequestURI(d.MetadataURL); err != nil {
+ return fmt.Errorf("invalid metadata_url: %w", err)
+ }
}
- if _, err := url.Parse(d.MetadataURL); err != nil {
- return errors.Wrap(err, "validating metadata_url")
+
+ if d.WebhookURL != "" {
+ if _, err := url.ParseRequestURI(d.WebhookURL); err != nil {
+ return fmt.Errorf("invalid webhook_url: %w", err)
+ }
+ }
+ return nil
+}
+
+type GithubPAT struct {
+ OAuth2Token string `toml:"oauth2_token" json:"oauth2-token"`
+}
+
+type GithubApp struct {
+ AppID int64 `toml:"app_id" json:"app-id"`
+ PrivateKeyPath string `toml:"private_key_path" json:"private-key-path"`
+ InstallationID int64 `toml:"installation_id" json:"installation-id"`
+}
+
+func (a *GithubApp) PrivateKeyBytes() ([]byte, error) {
+ keyBytes, err := os.ReadFile(a.PrivateKeyPath)
+ if err != nil {
+ return nil, fmt.Errorf("reading private_key_path: %w", err)
+ }
+ return keyBytes, nil
+}
+
+func (a *GithubApp) Validate() error {
+ if a.AppID == 0 {
+ return fmt.Errorf("missing app_id")
+ }
+ if a.PrivateKeyPath == "" {
+ return fmt.Errorf("missing private_key_path")
+ }
+ if a.InstallationID == 0 {
+ return fmt.Errorf("missing installation_id")
+ }
+
+ if _, err := os.Stat(a.PrivateKeyPath); err != nil {
+ return fmt.Errorf("error accessing private_key_path: %w", err)
+ }
+ // Read the private key as bytes
+ keyBytes, err := os.ReadFile(a.PrivateKeyPath)
+ if err != nil {
+ return fmt.Errorf("reading private_key_path: %w", err)
+ }
+ block, _ := pem.Decode(keyBytes)
+ // Parse the private key as PCKS1
+ _, err = x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf("parsing private_key_path: %w", err)
}
return nil
@@ -138,8 +282,11 @@ func (d *Default) Validate() error {
// Github hold configuration options specific to interacting with github.
// Currently that is just a OAuth2 personal token.
type Github struct {
- Name string `toml:"name" json:"name"`
- Description string `toml:"description" json:"description"`
+ Name string `toml:"name" json:"name"`
+ Description string `toml:"description" json:"description"`
+ // OAuth2Token is the personal access token used to authenticate with the
+ // github API. This is deprecated and will be removed in the future.
+ // Use the PAT section instead.
OAuth2Token string `toml:"oauth2_token" json:"oauth2-token"`
APIBaseURL string `toml:"api_base_url" json:"api-base-url"`
UploadBaseURL string `toml:"upload_base_url" json:"upload-base-url"`
@@ -147,7 +294,17 @@ type Github struct {
// CACertBundlePath is the path on disk to a CA certificate bundle that
// can validate the endpoints defined above. Leave empty if not using a
// self signed certificate.
- CACertBundlePath string `toml:"ca_cert_bundle" json:"ca-cert-bundle"`
+ CACertBundlePath string `toml:"ca_cert_bundle" json:"ca-cert-bundle"`
+ AuthType GithubAuthType `toml:"auth_type" json:"auth-type"`
+ PAT GithubPAT `toml:"pat" json:"pat"`
+ App GithubApp `toml:"app" json:"app"`
+}
+
+func (g *Github) GetAuthType() GithubAuthType {
+ if g.AuthType == "" {
+ return GithubAuthTypePAT
+ }
+ return g.AuthType
}
func (g *Github) APIEndpoint() string {
@@ -163,12 +320,12 @@ func (g *Github) CACertBundle() ([]byte, error) {
return nil, nil
}
if _, err := os.Stat(g.CACertBundlePath); err != nil {
- return nil, errors.Wrap(err, "accessing CA bundle")
+ return nil, fmt.Errorf("error accessing ca_cert_bundle: %w", err)
}
contents, err := os.ReadFile(g.CACertBundlePath)
if err != nil {
- return nil, errors.Wrap(err, "reading CA bundle")
+ return nil, fmt.Errorf("reading ca_cert_bundle: %w", err)
}
roots := x509.NewCertPool()
@@ -197,21 +354,107 @@ func (g *Github) BaseEndpoint() string {
}
func (g *Github) Validate() error {
- if g.OAuth2Token == "" {
- return fmt.Errorf("missing github oauth2 token")
+ if g.Name == "" {
+ return fmt.Errorf("missing credentials name")
+ }
+
+ if g.APIBaseURL != "" {
+ if _, err := url.ParseRequestURI(g.APIBaseURL); err != nil {
+ return fmt.Errorf("invalid api_base_url: %w", err)
+ }
+ }
+
+ if g.UploadBaseURL != "" {
+ if _, err := url.ParseRequestURI(g.UploadBaseURL); err != nil {
+ return fmt.Errorf("invalid upload_base_url: %w", err)
+ }
+ }
+
+ if g.BaseURL != "" {
+ if _, err := url.ParseRequestURI(g.BaseURL); err != nil {
+ return fmt.Errorf("invalid base_url: %w", err)
+ }
+ }
+
+ switch g.AuthType {
+ case GithubAuthTypeApp:
+ if err := g.App.Validate(); err != nil {
+ return fmt.Errorf("invalid github app config: %w", err)
+ }
+ default:
+ if g.OAuth2Token == "" && g.PAT.OAuth2Token == "" {
+ return fmt.Errorf("missing github oauth2 token")
+ }
+ if g.OAuth2Token != "" {
+ slog.Warn("the github.oauth2_token option is deprecated, please use the PAT section")
+ }
}
return nil
}
+func (g *Github) HTTPClient(ctx context.Context) (*http.Client, error) {
+ if err := g.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid github config: %w", err)
+ }
+ var roots *x509.CertPool
+ caBundle, err := g.CACertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("fetching CA cert bundle: %w", err)
+ }
+ if caBundle != nil {
+ roots = x509.NewCertPool()
+ ok := roots.AppendCertsFromPEM(caBundle)
+ if !ok {
+ return nil, fmt.Errorf("failed to parse CA cert")
+ }
+ }
+ // nolint:golangci-lint,gosec,godox
+ // TODO: set TLS MinVersion
+ httpTransport := &http.Transport{
+ TLSClientConfig: &tls.Config{
+ RootCAs: roots,
+ },
+ }
+
+ var tc *http.Client
+ switch g.AuthType {
+ case GithubAuthTypeApp:
+ itr, err := ghinstallation.NewKeyFromFile(httpTransport, g.App.AppID, g.App.InstallationID, g.App.PrivateKeyPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create github app installation transport: %w", err)
+ }
+
+ tc = &http.Client{Transport: itr}
+ default:
+ httpClient := &http.Client{Transport: httpTransport}
+ ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
+
+ token := g.PAT.OAuth2Token
+ if token == "" {
+ token = g.OAuth2Token
+ }
+
+ ts := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: token},
+ )
+ tc = oauth2.NewClient(ctx, ts)
+ }
+
+ return tc, nil
+}
+
// Provider holds access information for a particular provider.
// A provider offers compute resources on which we spin up self hosted runners.
type Provider struct {
Name string `toml:"name" json:"name"`
ProviderType params.ProviderType `toml:"provider_type" json:"provider-type"`
Description string `toml:"description" json:"description"`
- LXD LXD `toml:"lxd" json:"lxd"`
- External External `toml:"external" json:"external"`
+ // DisableJITConfig explicitly disables JIT configuration and forces runner registration
+ // tokens to be used. This may happen if a provider has not yet been updated to support
+ // JIT configuration.
+ DisableJITConfig bool `toml:"disable_jit_config" json:"disable-jit-config"`
+ External External `toml:"external" json:"external"`
}
func (p *Provider) Validate() error {
@@ -220,13 +463,9 @@ func (p *Provider) Validate() error {
}
switch p.ProviderType {
- case params.LXDProvider:
- if err := p.LXD.Validate(); err != nil {
- return errors.Wrap(err, "validating LXD provider info")
- }
case params.ExternalProvider:
if err := p.External.Validate(); err != nil {
- return errors.Wrap(err, "validating external provider info")
+ return fmt.Errorf("invalid external provider config: %w", err)
}
default:
return fmt.Errorf("unknown provider type: %s", p.ProviderType)
@@ -246,24 +485,29 @@ type Database struct {
// Don't lose or change this. It will invalidate all encrypted data
// in the DB. This field must be set and must be exactly 32 characters.
Passphrase string `toml:"passphrase"`
+
+ // MigrateCredentials is a list of github credentials that need to be migrated
+ // from the config file to the database. This field will be removed once GARM
+ // reaches version 0.2.x. It's only meant to be used for the migration process.
+ MigrateCredentials []Github `toml:"-"`
}
// GormParams returns the database type and connection URI
func (d *Database) GormParams() (dbType DBBackendType, uri string, err error) {
if err := d.Validate(); err != nil {
- return "", "", errors.Wrap(err, "validating database config")
+ return "", "", fmt.Errorf("error validating database config: %w", err)
}
dbType = d.DbBackend
switch dbType {
case MySQLBackend:
uri, err = d.MySQL.ConnectionString()
if err != nil {
- return "", "", errors.Wrap(err, "fetching mysql connection string")
+ return "", "", fmt.Errorf("error fetching mysql connection string: %w", err)
}
case SQLiteBackend:
uri, err = d.SQLite.ConnectionString()
if err != nil {
- return "", "", errors.Wrap(err, "fetching sqlite3 connection string")
+ return "", "", fmt.Errorf("error fetching sqlite3 connection string: %w", err)
}
default:
return "", "", fmt.Errorf("invalid database backend: %s", dbType)
@@ -289,11 +533,11 @@ func (d *Database) Validate() error {
switch d.DbBackend {
case MySQLBackend:
if err := d.MySQL.Validate(); err != nil {
- return errors.Wrap(err, "validating mysql config")
+ return fmt.Errorf("validating mysql config: %w", err)
}
case SQLiteBackend:
if err := d.SQLite.Validate(); err != nil {
- return errors.Wrap(err, "validating sqlite3 config")
+ return fmt.Errorf("validating sqlite3 config: %w", err)
}
default:
return fmt.Errorf("invalid database backend: %s", d.DbBackend)
@@ -303,7 +547,8 @@ func (d *Database) Validate() error {
// SQLite is the config entry for the sqlite3 section
type SQLite struct {
- DBFile string `toml:"db_file" json:"db-file"`
+ DBFile string `toml:"db_file" json:"db-file"`
+ BusyTimeoutSeconds int `toml:"busy_timeout_seconds" json:"busy-timeout-seconds"`
}
func (s *SQLite) Validate() error {
@@ -317,13 +562,18 @@ func (s *SQLite) Validate() error {
parent := filepath.Dir(s.DBFile)
if _, err := os.Stat(parent); err != nil {
- return errors.Wrapf(err, "accessing db_file parent dir: %s", parent)
+ return fmt.Errorf("parent directory of db_file does not exist: %w", err)
}
return nil
}
func (s *SQLite) ConnectionString() (string, error) {
- return fmt.Sprintf("%s?_journal_mode=WAL&_foreign_keys=ON", s.DBFile), nil
+ connectionString := fmt.Sprintf("%s?_journal_mode=WAL&_foreign_keys=ON", s.DBFile)
+ if s.BusyTimeoutSeconds > 0 {
+ timeout := s.BusyTimeoutSeconds * 1000
+ connectionString = fmt.Sprintf("%s&_busy_timeout=%d", connectionString, timeout)
+ }
+ return connectionString, nil
}
// MySQL is the config entry for the mysql section
@@ -377,8 +627,54 @@ func (t *TLSConfig) Validate() error {
}
type Metrics struct {
+ // DisableAuth defines if the API endpoint will be protected by
+ // JWT authentication
DisableAuth bool `toml:"disable_auth" json:"disable-auth"`
- Enable bool `toml:"enable" json:"enable"`
+ // Enable define if the API endpoint for metrics collection will
+ // be enabled
+ Enable bool `toml:"enable" json:"enable"`
+ // Period defines the internal period at which internal metrics are getting updated
+ // and propagated to the /metrics endpoint
+ Period time.Duration `toml:"period" json:"period"`
+}
+
+// ParseDuration parses the configured duration and returns a time.Duration of 0
+// if the duration is invalid.
+func (m *Metrics) ParseDuration() (time.Duration, error) {
+ duration, err := time.ParseDuration(fmt.Sprint(m.Period))
+ if err != nil {
+ return 0, err
+ }
+ return duration, nil
+}
+
+// Duration returns the configured duration or the default duration if no value
+// is configured or the configured value is invalid.
+func (m *Metrics) Duration() time.Duration {
+ duration, err := m.ParseDuration()
+ if err != nil {
+ slog.With(slog.Any("error", err)).Error(fmt.Sprintf("defined duration %s is invalid", m.Period))
+ }
+ if duration == 0 {
+ slog.Debug(fmt.Sprintf("using default duration %s for metrics update interval", appdefaults.DefaultMetricsUpdateInterval))
+ return appdefaults.DefaultMetricsUpdateInterval
+ }
+ return duration
+}
+
+// WebUI holds configuration for the web UI
+type WebUI struct {
+ EnableWebUI bool `toml:"enable" json:"enable"`
+}
+
+// Validate validates the WebUI config
+func (w *WebUI) Validate() error {
+ return nil
+}
+
+// GetWebappPath returns the webapp path with proper formatting
+func (w *WebUI) GetWebappPath() string {
+ return "/ui/"
}
// APIServer holds configuration for the API server
@@ -389,6 +685,7 @@ type APIServer struct {
UseTLS bool `toml:"use_tls" json:"use-tls"`
TLSConfig TLSConfig `toml:"tls" json:"tls"`
CORSOrigins []string `toml:"cors_origins" json:"cors-origins"`
+ WebUI WebUI `toml:"webui" json:"webui"`
}
// BindAddress returns a host:port string.
@@ -400,7 +697,7 @@ func (a *APIServer) BindAddress() string {
func (a *APIServer) Validate() error {
if a.UseTLS {
if err := a.TLSConfig.Validate(); err != nil {
- return errors.Wrap(err, "TLS validation failed")
+ return fmt.Errorf("invalid tls config: %w", err)
}
}
if a.Port > 65535 || a.Port < 1 {
@@ -414,6 +711,11 @@ func (a *APIServer) Validate() error {
// when we try to bind to it.
return fmt.Errorf("invalid IP address")
}
+
+ if err := a.WebUI.Validate(); err != nil {
+ return fmt.Errorf("invalid webui config: %w", err)
+ }
+
return nil
}
@@ -430,9 +732,10 @@ func (d *timeToLive) ParseDuration() (time.Duration, error) {
func (d *timeToLive) Duration() time.Duration {
duration, err := d.ParseDuration()
if err != nil {
- log.Printf("failed to parse duration: %s", err)
+ slog.With(slog.Any("error", err)).Error("failed to parse duration")
return appdefaults.DefaultJWTTTL
}
+ // nolint:golangci-lint,godox
// TODO(gabriel-samfira): should we have a minimum TTL?
if duration < appdefaults.DefaultJWTTTL {
return appdefaults.DefaultJWTTTL
@@ -444,7 +747,7 @@ func (d *timeToLive) Duration() time.Duration {
func (d *timeToLive) UnmarshalText(text []byte) error {
_, err := time.ParseDuration(string(text))
if err != nil {
- return errors.Wrap(err, "parsing time_to_live")
+ return fmt.Errorf("invalid duration: %w", err)
}
*d = timeToLive(text)
@@ -460,7 +763,7 @@ type JWTAuth struct {
// Validate validates the JWTAuth config
func (j *JWTAuth) Validate() error {
if _, err := j.TimeToLive.ParseDuration(); err != nil {
- return errors.Wrap(err, "parsing duration")
+ return fmt.Errorf("invalid time_to_live: %w", err)
}
if j.Secret == "" {
diff --git a/config/config_test.go b/config/config_test.go
index e7f8489f..bbf9e299 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -15,17 +15,21 @@
package config
import (
+ "context"
"os"
"path/filepath"
"testing"
"time"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/util/appdefaults"
+ "github.com/bradleyfalzon/ghinstallation/v2"
"github.com/stretchr/testify/require"
+ "golang.org/x/oauth2"
+
+ "github.com/cloudbase/garm/util/appdefaults"
)
var (
+ // nolint: golangci-lint,gosec
EncryptionPassphrase = "bocyasicgatEtenOubwonIbsudNutDom"
WeakEncryptionPassphrase = "1234567890abcdefghijklmnopqrstuv"
)
@@ -76,15 +80,7 @@ func getDefaultDatabaseConfig(dir string) Database {
}
func getDefaultProvidersConfig() []Provider {
- lxdConfig := getDefaultLXDConfig()
- return []Provider{
- {
- Name: "test_lxd",
- ProviderType: params.LXDProvider,
- Description: "test LXD provider",
- LXD: lxdConfig,
- },
- }
+ return []Provider{}
}
func getDefaultGithubConfig() []Github {
@@ -147,20 +143,20 @@ func TestDefaultSectionConfig(t *testing.T) {
errString: "",
},
{
- name: "CallbackURL cannot be empty",
+ name: "CallbackURL must be valid if set",
cfg: Default{
- CallbackURL: "",
+ CallbackURL: "bogus_url",
MetadataURL: cfg.MetadataURL,
},
- errString: "missing callback_url",
+ errString: "invalid callback_url",
},
{
- name: "MetadataURL cannot be empty",
+ name: "MetadataURL must be valid if set",
cfg: Default{
CallbackURL: cfg.CallbackURL,
- MetadataURL: "",
+ MetadataURL: "bogus_url",
},
- errString: "missing metadata-url",
+ errString: "invalid metadata_url",
},
}
@@ -238,7 +234,7 @@ func TestValidateAPIServerConfig(t *testing.T) {
TLSConfig: TLSConfig{},
UseTLS: true,
},
- errString: "TLS validation failed:*",
+ errString: "invalid tls config: missing crt or key",
},
{
name: "Skip TLS config validation if UseTLS is false",
@@ -393,6 +389,12 @@ func TestGormParams(t *testing.T) {
require.Equal(t, SQLiteBackend, dbType)
require.Equal(t, filepath.Join(dir, "garm.db?_journal_mode=WAL&_foreign_keys=ON"), uri)
+ cfg.SQLite.BusyTimeoutSeconds = 5
+ dbType, uri, err = cfg.GormParams()
+ require.Nil(t, err)
+ require.Equal(t, SQLiteBackend, dbType)
+ require.Equal(t, filepath.Join(dir, "garm.db?_journal_mode=WAL&_foreign_keys=ON&_busy_timeout=5000"), uri)
+
cfg.DbBackend = MySQLBackend
cfg.MySQL = getMySQLDefaultConfig()
cfg.SQLite = SQLite{}
@@ -401,7 +403,6 @@ func TestGormParams(t *testing.T) {
require.Nil(t, err)
require.Equal(t, MySQLBackend, dbType)
require.Equal(t, "test:test@tcp(127.0.0.1)/garm?charset=utf8&parseTime=True&loc=Local&timeout=5s", uri)
-
}
func TestSQLiteConfig(t *testing.T) {
@@ -442,7 +443,7 @@ func TestSQLiteConfig(t *testing.T) {
cfg: SQLite{
DBFile: "/i/dont/exist/test.db",
},
- errString: "accessing db_file parent dir:.*no such file or directory",
+ errString: "parent directory of db_file does not exist: stat.*",
},
}
@@ -497,7 +498,7 @@ func TestJWTAuthConfig(t *testing.T) {
Secret: cfg.Secret,
TimeToLive: "bogus",
},
- errString: "parsing duration: time: invalid duration*",
+ errString: "invalid time_to_live: time: invalid duration*",
},
}
@@ -516,7 +517,6 @@ func TestJWTAuthConfig(t *testing.T) {
func TestTimeToLiveDuration(t *testing.T) {
cfg := JWTAuth{
- Secret: EncryptionPassphrase,
TimeToLive: "48h",
}
@@ -565,3 +565,374 @@ func TestNewConfigInvalidConfig(t *testing.T) {
require.NotNil(t, err)
require.Regexp(t, "validating config", err.Error())
}
+
+func TestGithubConfig(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+
+ tests := []struct {
+ name string
+ cfg Github
+ errString string
+ }{
+ {
+ name: "Config is valid",
+ cfg: cfg[0],
+ errString: "",
+ },
+ {
+ name: "BaseURL is invalid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ BaseURL: "bogus",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ errString: "invalid base_url: parse.*",
+ },
+ {
+ name: "APIBaseURL is invalid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ APIBaseURL: "bogus",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ errString: "invalid api_base_url: parse.*",
+ },
+ {
+ name: "UploadBaseURL is invalid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ UploadBaseURL: "bogus",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ errString: "invalid upload_base_url: parse.*",
+ },
+ {
+ name: "BaseURL is set and is valid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ BaseURL: "https://github.example.com/",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ },
+ {
+ name: "APIBaseURL is set and is valid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ APIBaseURL: "https://github.example.com/api/v3",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ },
+ {
+ name: "UploadBaseURL is set and is valid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ UploadBaseURL: "https://github.example.com/uploads",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ },
+ {
+ name: "OAuth2Token is empty",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ },
+ errString: "missing github oauth2 token",
+ },
+ {
+ name: "Name is empty",
+ cfg: Github{
+ Name: "",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ },
+ errString: "missing credentials name",
+ },
+ {
+ name: "OAuth token is set in the PAT section",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ },
+ {
+ name: "OAuth token is empty in the PAT section",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "",
+ },
+ },
+ errString: "missing github oauth2 token",
+ },
+ {
+ name: "Valid App section",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "../testdata/certs/srv-key.pem",
+ },
+ },
+ },
+ {
+ name: "AppID is missing",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 0,
+ InstallationID: 99,
+ PrivateKeyPath: "../testdata/certs/srv-key.pem",
+ },
+ },
+ errString: "missing app_id",
+ },
+ {
+ name: "InstallationID is missing",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 0,
+ PrivateKeyPath: "../testdata/certs/srv-key.pem",
+ },
+ },
+ errString: "missing installation_id",
+ },
+ {
+ name: "PrivateKeyPath is missing",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "",
+ },
+ },
+ errString: "missing private_key_path",
+ },
+ {
+ name: "PrivateKeyPath is invalid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "/i/dont/exist",
+ },
+ },
+ errString: "invalid github app config: error accessing private_key_path: stat /i/dont/exist: no such file or directory",
+ },
+ {
+ name: "PrivateKeyPath is not a valid RSA private key",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "../testdata/certs/srv-pub.pem",
+ },
+ },
+ errString: "invalid github app config: parsing private_key_path: asn1: structure error:.*",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ err := tc.cfg.Validate()
+ if tc.errString == "" {
+ require.Nil(t, err)
+ } else {
+ require.NotNil(t, err)
+ require.Regexp(t, tc.errString, err.Error())
+ }
+ })
+ }
+}
+
+func TestGithubAPIEndpoint(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+
+ require.Equal(t, "https://api.github.com/", cfg[0].APIEndpoint())
+}
+
+func TestGithubAPIEndpointIsSet(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+ cfg[0].APIBaseURL = "https://github.example.com/api/v3"
+
+ require.Equal(t, "https://github.example.com/api/v3", cfg[0].APIEndpoint())
+}
+
+func TestUploadEndpoint(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+
+ require.Equal(t, "https://uploads.github.com/", cfg[0].UploadEndpoint())
+}
+
+func TestUploadEndpointIsSet(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+ cfg[0].UploadBaseURL = "https://github.example.com/uploads"
+
+ require.Equal(t, "https://github.example.com/uploads", cfg[0].UploadEndpoint())
+}
+
+func TestGithubBaseURL(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+
+ require.Equal(t, "https://github.com", cfg[0].BaseEndpoint())
+}
+
+func TestGithubBaseURLIsSet(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+ cfg[0].BaseURL = "https://github.example.com"
+
+ require.Equal(t, "https://github.example.com", cfg[0].BaseEndpoint())
+}
+
+func TestCACertBundle(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ CACertBundlePath: "../testdata/certs/srv-pub.pem",
+ }
+
+ cert, err := cfg.CACertBundle()
+ require.Nil(t, err)
+ require.NotNil(t, cert)
+}
+
+func TestCACertBundleInvalidPath(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ CACertBundlePath: "/i/dont/exist",
+ }
+
+ cert, err := cfg.CACertBundle()
+ require.NotNil(t, err)
+ require.EqualError(t, err, "error accessing ca_cert_bundle: stat /i/dont/exist: no such file or directory")
+ require.Nil(t, cert)
+}
+
+func TestCACertBundleInvalidFile(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ CACertBundlePath: "../testdata/config.toml",
+ }
+
+ cert, err := cfg.CACertBundle()
+ require.NotNil(t, err)
+ require.EqualError(t, err, "failed to parse CA cert bundle")
+ require.Nil(t, cert)
+}
+
+func TestGithubHTTPClientDeprecatedPAT(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ }
+
+ client, err := cfg.HTTPClient(context.Background())
+ require.Nil(t, err)
+ require.NotNil(t, client)
+
+ transport, ok := client.Transport.(*oauth2.Transport)
+ require.True(t, ok)
+ require.NotNil(t, transport)
+}
+
+func TestGithubHTTPClientPAT(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ }
+
+ client, err := cfg.HTTPClient(context.Background())
+ require.Nil(t, err)
+ require.NotNil(t, client)
+
+ transport, ok := client.Transport.(*oauth2.Transport)
+ require.True(t, ok)
+ require.NotNil(t, transport)
+}
+
+func TestGithubHTTPClientApp(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "../testdata/certs/srv-key.pem",
+ },
+ }
+
+ client, err := cfg.HTTPClient(context.Background())
+ require.Nil(t, err)
+ require.NotNil(t, client)
+
+ transport, ok := client.Transport.(*ghinstallation.Transport)
+ require.True(t, ok)
+ require.NotNil(t, transport)
+}
diff --git a/config/external.go b/config/external.go
index 5bd9e273..ca98bdfb 100644
--- a/config/external.go
+++ b/config/external.go
@@ -18,10 +18,9 @@ import (
"fmt"
"os"
"path/filepath"
+ "strings"
"github.com/cloudbase/garm-provider-common/util/exec"
-
- "github.com/pkg/errors"
)
// External represents the config for an external provider.
@@ -30,6 +29,10 @@ import (
// whatever programming language you wish, while still remaining compatible
// with garm.
type External struct {
+ // InterfaceVersion is the version of the interface that the external
+ // provider implements. This is used to ensure compatibility between
+ // the external provider and garm.
+ InterfaceVersion string `toml:"interface_version" json:"interface-version"`
// ConfigFile is the path on disk to a file which will be passed to
// the external binary as an environment variable: GARM_PROVIDER_CONFIG
// You can use this file for any configuration you need to do for the
@@ -42,6 +45,25 @@ type External struct {
// the provider. If specified, it will take precedence over the "garm-external-provider"
// executable in the ProviderDir.
ProviderExecutable string `toml:"provider_executable" json:"provider-executable"`
+ // EnvironmentVariables is a list of environment variable names that will be
+ // passed to the external binary together with their values.
+ EnvironmentVariables []string `toml:"environment_variables" json:"environment-variables"`
+}
+
+func (e *External) GetEnvironmentVariables() []string {
+ envVars := []string{}
+
+ for _, configuredEnvVars := range e.EnvironmentVariables {
+ // discover environment variables
+ for _, k := range os.Environ() {
+ variable := strings.SplitN(k, "=", 2)
+ if strings.HasPrefix(variable[0], configuredEnvVars) &&
+ !strings.HasPrefix(variable[0], EnvironmentVariablePrefix) {
+ envVars = append(envVars, k)
+ }
+ }
+ }
+ return envVars
}
func (e *External) ExecutablePath() (string, error) {
@@ -68,10 +90,10 @@ func (e *External) Validate() error {
execPath, err := e.ExecutablePath()
if err != nil {
- return errors.Wrap(err, "fetching executable path")
+ return fmt.Errorf("failed to get executable path: %w", err)
}
if _, err := os.Stat(execPath); err != nil {
- return errors.Wrap(err, "checking provider executable")
+ return fmt.Errorf("failed to access external provider binary %s", execPath)
}
if !exec.IsExecutable(execPath) {
return fmt.Errorf("external provider binary %s is not executable", execPath)
diff --git a/config/external_test.go b/config/external_test.go
index 1da36d33..68ca3636 100644
--- a/config/external_test.go
+++ b/config/external_test.go
@@ -18,6 +18,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "slices"
"testing"
"github.com/stretchr/testify/require"
@@ -30,7 +31,8 @@ func getDefaultExternalConfig(t *testing.T) External {
}
t.Cleanup(func() { os.RemoveAll(dir) })
- err = os.WriteFile(filepath.Join(dir, "garm-external-provider"), []byte{}, 0755)
+ // nolint:golangci-lint,gosec
+ err = os.WriteFile(filepath.Join(dir, "garm-external-provider"), []byte{}, 0o755)
if err != nil {
t.Fatalf("failed to write file: %s", err)
}
@@ -76,7 +78,7 @@ func TestExternal(t *testing.T) {
ConfigFile: "",
ProviderDir: "../test",
},
- errString: "fetching executable path: executable path must be an absolute path",
+ errString: "failed to get executable path: executable path must be an absolute path",
},
{
name: "Provider executable path must be absolute",
@@ -84,7 +86,7 @@ func TestExternal(t *testing.T) {
ConfigFile: "",
ProviderExecutable: "../test",
},
- errString: "fetching executable path: executable path must be an absolute path",
+ errString: "failed to get executable path: executable path must be an absolute path",
},
{
name: "Provider executable not found",
@@ -92,7 +94,7 @@ func TestExternal(t *testing.T) {
ConfigFile: "",
ProviderDir: "/tmp",
},
- errString: "checking provider executable: stat /tmp/garm-external-provider: no such file or directory",
+ errString: "failed to access external provider binary /tmp/garm-external-provider",
},
}
@@ -121,3 +123,101 @@ func TestProviderExecutableIsExecutable(t *testing.T) {
require.NotNil(t, err)
require.EqualError(t, err, fmt.Sprintf("external provider binary %s is not executable", execPath))
}
+
+func TestExternalEnvironmentVariables(t *testing.T) {
+ cfg := getDefaultExternalConfig(t)
+
+ tests := []struct {
+ name string
+ cfg External
+ expectedEnvironmentVariables []string
+ environmentVariables map[string]string
+ }{
+ {
+ name: "Provider with no additional environment variables",
+ cfg: cfg,
+ expectedEnvironmentVariables: []string{},
+ environmentVariables: map[string]string{
+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "PROVIDER_LOG_LEVEL": "debug",
+ "PROVIDER_TIMEOUT": "30",
+ "PROVIDER_RETRY_COUNT": "3",
+ "INFRA_REGION": "us-east-1",
+ },
+ },
+ {
+ name: "Provider with additional environment variables",
+ cfg: External{
+ ConfigFile: "",
+ ProviderDir: "../test",
+ EnvironmentVariables: []string{
+ "PROVIDER_",
+ "INFRA_REGION",
+ },
+ },
+ environmentVariables: map[string]string{
+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "PROVIDER_LOG_LEVEL": "debug",
+ "PROVIDER_TIMEOUT": "30",
+ "PROVIDER_RETRY_COUNT": "3",
+ "INFRA_REGION": "us-east-1",
+ "GARM_POOL_ID": "f3b21376-e189-43ae-a1bd-7a3ffee57a58",
+ },
+ expectedEnvironmentVariables: []string{
+ "PROVIDER_LOG_LEVEL=debug",
+ "PROVIDER_TIMEOUT=30",
+ "PROVIDER_RETRY_COUNT=3",
+ "INFRA_REGION=us-east-1",
+ },
+ },
+ {
+ name: "GARM variables are getting ignored",
+ cfg: External{
+ ConfigFile: "",
+ ProviderDir: "../test",
+ EnvironmentVariables: []string{
+ "PROVIDER_",
+ "INFRA_REGION",
+ "GARM_SERVER",
+ },
+ },
+ environmentVariables: map[string]string{
+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "PROVIDER_LOG_LEVEL": "debug",
+ "PROVIDER_TIMEOUT": "30",
+ "PROVIDER_RETRY_COUNT": "3",
+ "INFRA_REGION": "us-east-1",
+ "GARM_POOL_ID": "f3b21376-e189-43ae-a1bd-7a3ffee57a58",
+ "GARM_SERVER_SHUTDOWN": "true",
+ "GARM_SERVER_INSECURE": "true",
+ },
+ expectedEnvironmentVariables: []string{
+ "PROVIDER_LOG_LEVEL=debug",
+ "PROVIDER_TIMEOUT=30",
+ "PROVIDER_RETRY_COUNT=3",
+ "INFRA_REGION=us-east-1",
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ // set environment variables
+ for k, v := range tc.environmentVariables {
+ err := os.Setenv(k, v)
+ if err != nil {
+ t.Fatalf("failed to set environment variable: %s", err)
+ }
+ }
+
+ envVars := tc.cfg.GetEnvironmentVariables()
+
+ // sort slices to make them comparable
+ slices.Sort(envVars)
+ slices.Sort(tc.expectedEnvironmentVariables)
+
+ // compare slices
+ require.Equal(t, tc.expectedEnvironmentVariables, envVars)
+ })
+ }
+}
diff --git a/config/lxd.go b/config/lxd.go
deleted file mode 100644
index 8b8b1f7e..00000000
--- a/config/lxd.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package config
-
-import (
- "fmt"
- "net/url"
- "os"
-
- "github.com/pkg/errors"
-)
-
-type LXDRemoteProtocol string
-type LXDImageType string
-
-func (l LXDImageType) String() string {
- return string(l)
-}
-
-const (
- SimpleStreams LXDRemoteProtocol = "simplestreams"
- LXDImageVirtualMachine LXDImageType = "virtual-machine"
- LXDImageContainer LXDImageType = "container"
-)
-
-// LXDImageRemote holds information about a remote server from which LXD can fetch
-// OS images. Typically this will be a simplestreams server.
-type LXDImageRemote struct {
- Address string `toml:"addr" json:"addr"`
- Public bool `toml:"public" json:"public"`
- Protocol LXDRemoteProtocol `toml:"protocol" json:"protocol"`
- InsecureSkipVerify bool `toml:"skip_verify" json:"skip-verify"`
-}
-
-func (l *LXDImageRemote) Validate() error {
- if l.Protocol != SimpleStreams {
- // Only supports simplestreams for now.
- return fmt.Errorf("invalid remote protocol %s. Supported protocols: %s", l.Protocol, SimpleStreams)
- }
- if l.Address == "" {
- return fmt.Errorf("missing address")
- }
-
- url, err := url.ParseRequestURI(l.Address)
- if err != nil {
- return errors.Wrap(err, "validating address")
- }
-
- if url.Scheme != "http" && url.Scheme != "https" {
- return fmt.Errorf("address must be http or https")
- }
-
- return nil
-}
-
-// LXD holds connection information for an LXD cluster.
-type LXD struct {
- // UnixSocket is the path on disk to the LXD unix socket. If defined,
- // this is prefered over connecting via HTTPs.
- UnixSocket string `toml:"unix_socket_path" json:"unix-socket-path"`
-
- // Project name is the name of the project in which this runner will create
- // instances. If this option is not set, the default project will be used.
- // The project used here, must have all required profiles created by you
- // beforehand. For LXD, the "flavor" used in the runner definition for a pool
- // equates to a profile in the desired project.
- ProjectName string `toml:"project_name" json:"project-name"`
-
- // IncludeDefaultProfile specifies whether or not this provider will always add
- // the "default" profile to any newly created instance.
- IncludeDefaultProfile bool `toml:"include_default_profile" json:"include-default-profile"`
-
- // URL holds the URL of the remote LXD server.
- // example: https://10.10.10.1:8443/
- URL string `toml:"url" json:"url"`
- // ClientCertificate is the x509 client certificate path used for authentication.
- ClientCertificate string `toml:"client_certificate" json:"client_certificate"`
- // ClientKey is the key used for client certificate authentication.
- ClientKey string `toml:"client_key" json:"client-key"`
- // TLS certificate of the remote server. If not specified, the system CA is used.
- TLSServerCert string `toml:"tls_server_certificate" json:"tls-server-certificate"`
- // TLSCA is the TLS CA certificate when running LXD in PKI mode.
- TLSCA string `toml:"tls_ca" json:"tls-ca"`
-
- // ImageRemotes is a map to a set of remote image repositories we can use to
- // download images.
- ImageRemotes map[string]LXDImageRemote `toml:"image_remotes" json:"image-remotes"`
-
- // SecureBoot enables secure boot for VMs spun up using this provider.
- SecureBoot bool `toml:"secure_boot" json:"secure-boot"`
-
- // InstanceType allows you to choose between a virtual machine and a container
- InstanceType LXDImageType `toml:"instance_type" json:"instance-type"`
-}
-
-func (l *LXD) GetInstanceType() LXDImageType {
- switch l.InstanceType {
- case LXDImageVirtualMachine, LXDImageContainer:
- return l.InstanceType
- default:
- return LXDImageVirtualMachine
- }
-}
-
-func (l *LXD) Validate() error {
- if l.UnixSocket != "" {
- if _, err := os.Stat(l.UnixSocket); err != nil {
- return fmt.Errorf("could not access unix socket %s: %q", l.UnixSocket, err)
- }
-
- return nil
- }
-
- if l.URL == "" {
- return fmt.Errorf("unix_socket or address must be specified")
- }
-
- url, err := url.ParseRequestURI(l.URL)
- if err != nil {
- return fmt.Errorf("invalid LXD URL")
- }
-
- if url.Scheme != "https" {
- return fmt.Errorf("address must be https")
- }
-
- if l.ClientCertificate == "" || l.ClientKey == "" {
- return fmt.Errorf("client_certificate and client_key are mandatory")
- }
-
- if _, err := os.Stat(l.ClientCertificate); err != nil {
- return fmt.Errorf("failed to access client certificate %s: %q", l.ClientCertificate, err)
- }
-
- if _, err := os.Stat(l.ClientKey); err != nil {
- return fmt.Errorf("failed to access client key %s: %q", l.ClientKey, err)
- }
-
- if l.TLSServerCert != "" {
- if _, err := os.Stat(l.TLSServerCert); err != nil {
- return fmt.Errorf("failed to access tls_server_certificate %s: %q", l.TLSServerCert, err)
- }
- }
-
- for name, val := range l.ImageRemotes {
- if err := val.Validate(); err != nil {
- return fmt.Errorf("remote %s is invalid: %s", name, err)
- }
- }
- return nil
-}
diff --git a/config/lxd_test.go b/config/lxd_test.go
deleted file mode 100644
index 1bba515d..00000000
--- a/config/lxd_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package config
-
-import (
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func getDefaultLXDImageRemoteConfig() LXDImageRemote {
- return LXDImageRemote{
- Address: "https://cloud-images.ubuntu.com/releases",
- Public: true,
- Protocol: SimpleStreams,
- InsecureSkipVerify: false,
- }
-}
-
-func getDefaultLXDConfig() LXD {
- remote := getDefaultLXDImageRemoteConfig()
- return LXD{
- URL: "https://example.com:8443",
- ProjectName: "default",
- IncludeDefaultProfile: false,
- ClientCertificate: "../testdata/lxd/certs/client.crt",
- ClientKey: "../testdata/lxd/certs/client.key",
- TLSServerCert: "../testdata/lxd/certs/servercert.crt",
- ImageRemotes: map[string]LXDImageRemote{
- "default": remote,
- },
- SecureBoot: false,
- }
-}
-
-func TestLXDRemote(t *testing.T) {
- cfg := getDefaultLXDImageRemoteConfig()
-
- err := cfg.Validate()
- require.Nil(t, err)
-}
-
-func TestLXDRemoteEmptyAddress(t *testing.T) {
- cfg := getDefaultLXDImageRemoteConfig()
-
- cfg.Address = ""
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "missing address")
-}
-
-func TestLXDRemoteInvalidAddress(t *testing.T) {
- cfg := getDefaultLXDImageRemoteConfig()
-
- cfg.Address = "bogus address"
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "validating address: parse \"bogus address\": invalid URI for request")
-}
-
-func TestLXDRemoteIvalidAddressScheme(t *testing.T) {
- cfg := getDefaultLXDImageRemoteConfig()
-
- cfg.Address = "ftp://whatever"
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "address must be http or https")
-}
-
-func TestLXDConfig(t *testing.T) {
- cfg := getDefaultLXDConfig()
- err := cfg.Validate()
- require.Nil(t, err)
-}
-
-func TestLXDWithInvalidUnixSocket(t *testing.T) {
- cfg := getDefaultLXDConfig()
-
- cfg.UnixSocket = "bogus unix socket"
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "could not access unix socket bogus unix socket: \"stat bogus unix socket: no such file or directory\"")
-}
-
-func TestMissingUnixSocketAndMissingURL(t *testing.T) {
- cfg := getDefaultLXDConfig()
-
- cfg.URL = ""
- cfg.UnixSocket = ""
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "unix_socket or address must be specified")
-}
-
-func TestInvalidLXDURL(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.URL = "bogus"
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "invalid LXD URL")
-}
-
-func TestLXDURLIsHTTPS(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.URL = "http://example.com"
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "address must be https")
-}
-
-func TestMissingClientCertOrKey(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.ClientKey = ""
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "client_certificate and client_key are mandatory")
-
- cfg = getDefaultLXDConfig()
- cfg.ClientCertificate = ""
- err = cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "client_certificate and client_key are mandatory")
-}
-
-func TestLXDIvalidCertOrKeyPaths(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.ClientCertificate = "/i/am/not/here"
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "failed to access client certificate /i/am/not/here: \"stat /i/am/not/here: no such file or directory\"")
-
- cfg.ClientCertificate = "../testdata/lxd/certs/client.crt"
- cfg.ClientKey = "/me/neither"
-
- err = cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "failed to access client key /me/neither: \"stat /me/neither: no such file or directory\"")
-}
-
-func TestLXDInvalidServerCertPath(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.TLSServerCert = "/not/a/valid/server/cert/path"
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "failed to access tls_server_certificate /not/a/valid/server/cert/path: \"stat /not/a/valid/server/cert/path: no such file or directory\"")
-}
-
-func TestInvalidLXDImageRemotes(t *testing.T) {
- cfg := getDefaultLXDConfig()
-
- cfg.ImageRemotes["default"] = LXDImageRemote{
- Protocol: LXDRemoteProtocol("bogus"),
- }
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "remote default is invalid: invalid remote protocol bogus. Supported protocols: simplestreams")
-}
diff --git a/contrib/providers.d/azure/cloudconfig/install_runner.tpl b/contrib/providers.d/azure/cloudconfig/install_runner.tpl
deleted file mode 100644
index 910d8eac..00000000
--- a/contrib/providers.d/azure/cloudconfig/install_runner.tpl
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-METADATA_URL="GARM_METADATA_URL"
-CALLBACK_URL="GARM_CALLBACK_URL"
-BEARER_TOKEN="GARM_CALLBACK_TOKEN"
-DOWNLOAD_URL="GH_DOWNLOAD_URL"
-DOWNLOAD_TOKEN="GH_TEMP_DOWNLOAD_TOKEN"
-FILENAME="GH_FILENAME"
-TARGET_URL="GH_TARGET_URL"
-RUNNER_NAME="GH_RUNNER_NAME"
-RUNNER_LABELS="GH_RUNNER_LABELS"
-TEMP_TOKEN=""
-
-
-if [ -z "$METADATA_URL" ];then
- echo "no token is available and METADATA_URL is not set"
- exit 1
-fi
-
-function call() {
- PAYLOAD="$1"
- curl --fail -s -X POST -d "${PAYLOAD}" -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${CALLBACK_URL}" || echo "failed to call home: exit code ($?)"
-}
-
-function sendStatus() {
- MSG="$1"
- call "{\"status\": \"installing\", \"message\": \"$MSG\"}"
-}
-
-function success() {
- MSG="$1"
- ID=$2
- call "{\"status\": \"idle\", \"message\": \"$MSG\", \"agent_id\": $ID}"
-}
-
-function fail() {
- MSG="$1"
- call "{\"status\": \"failed\", \"message\": \"$MSG\"}"
- exit 1
-}
-
-if [ ! -z "$DOWNLOAD_TOKEN" ]; then
- TEMP_TOKEN="Authorization: Bearer $DOWNLOAD_TOKEN"
-fi
-
-sendStatus "downloading tools from ${DOWNLOAD_URL}"
-curl --fail -L -H "${TEMP_TOKEN}" -o "/home/runner/${FILENAME}" "${DOWNLOAD_URL}" || fail "failed to download tools"
-
-mkdir -p /home/runner/actions-runner || fail "failed to create actions-runner folder"
-
-sendStatus "extracting runner"
-tar xf "/home/runner/${FILENAME}" -C /home/runner/actions-runner/ || fail "failed to extract runner"
-chown runner:runner -R /home/runner/actions-runner/ || fail "failed to change owner"
-
-sendStatus "installing dependencies"
-cd /home/runner/actions-runner
-sudo ./bin/installdependencies.sh || fail "failed to install dependencies"
-
-sendStatus "fetching runner registration token"
-GITHUB_TOKEN=$(curl --fail -s -X GET -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${METADATA_URL}" || fail "failed to get runner registration token")
-
-sendStatus "configuring runner"
-sudo -u runner -- ./config.sh --unattended --url "${TARGET_URL}" --token "${GITHUB_TOKEN}" --name "${RUNNER_NAME}" --labels "${RUNNER_LABELS}" --ephemeral || fail "failed to configure runner"
-
-sendStatus "installing runner service"
-./svc.sh install runner || fail "failed to install service"
-
-sendStatus "starting service"
-./svc.sh start || fail "failed to start service"
-
-set +e
-AGENT_ID=$(grep "agentId" /home/runner/actions-runner/.runner | tr -d -c 0-9)
-if [ $? -ne 0 ];then
- fail "failed to get agent ID"
-fi
-set -e
-
-success "runner successfully installed" $AGENT_ID
\ No newline at end of file
diff --git a/contrib/providers.d/azure/cloudconfig/userdata.tpl b/contrib/providers.d/azure/cloudconfig/userdata.tpl
deleted file mode 100644
index 10ef2b51..00000000
--- a/contrib/providers.d/azure/cloudconfig/userdata.tpl
+++ /dev/null
@@ -1,31 +0,0 @@
-#cloud-config
-package_upgrade: true
-packages:
- - curl
- - tar
-system_info:
- default_user:
- name: runner
- home: /home/runner
- shell: /bin/bash
- groups:
- - sudo
- - adm
- - cdrom
- - dialout
- - dip
- - video
- - plugdev
- - netdev
- - docker
- - lxd
- sudo: ALL=(ALL) NOPASSWD:ALL
-runcmd:
- - /install_runner.sh
- - rm -f /install_runner.sh
-write_files:
- - encoding: b64
- content: RUNNER_INSTALL_B64
- owner: root:root
- path: /install_runner.sh
- permissions: "755"
diff --git a/contrib/providers.d/azure/config.sh b/contrib/providers.d/azure/config.sh
deleted file mode 100644
index f99f42ac..00000000
--- a/contrib/providers.d/azure/config.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-# Azure service principal credentials
-export AZURE_SUBSCRIPTION_ID=""
-export AZURE_TENANT_ID=""
-export AZURE_CLIENT_ID=""
-export AZURE_CLIENT_SECRET=""
-
-# GARM config
-export LOCATION="westeurope"
diff --git a/contrib/providers.d/azure/garm-external-provider b/contrib/providers.d/azure/garm-external-provider
deleted file mode 100755
index 7974f40f..00000000
--- a/contrib/providers.d/azure/garm-external-provider
+++ /dev/null
@@ -1,370 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-if [ ! -t 0 ]
-then
- INPUT=$(cat -)
-fi
-MYPATH=$(realpath ${BASH_SOURCE[0]})
-MYDIR=$(dirname "${MYPATH}")
-TEMPLATES="$MYDIR/cloudconfig"
-
-# Defaults
-LOCATION=${LOCATION:"westeurope"}
-
-# END Defaults
-
-if [ -z "$GARM_PROVIDER_CONFIG_FILE" ]
-then
- echo "no config file specified in env"
- exit 1
-fi
-
-source "$GARM_PROVIDER_CONFIG_FILE"
-
-declare -A GARM_TO_GH_ARCH_MAP
-GARM_TO_GH_ARCH_MAP["amd64"]="x64"
-GARM_TO_GH_ARCH_MAP["arm"]="arm"
-GARM_TO_GH_ARCH_MAP["arm64"]="arm64"
-
-declare -A AZURE_OS_TO_GH_OS_MAP
-AZURE_OS_TO_GH_OS_MAP["Linux"]="linux"
-AZURE_OS_TO_GH_OS_MAP["Windows"]="win"
-
-# https://docs.microsoft.com/en-us/azure/virtual-machines/states-billing#power-states-and-billing
-declare -A AZURE_POWER_STATE_MAP
-AZURE_POWER_STATE_MAP["VM starting"]="pending_create"
-AZURE_POWER_STATE_MAP["VM running"]="running"
-AZURE_POWER_STATE_MAP["VM stopping"]="stopped"
-AZURE_POWER_STATE_MAP["VM stopped"]="stopped"
-AZURE_POWER_STATE_MAP["VM deallocating"]="stopped"
-AZURE_POWER_STATE_MAP["VM deallocated"]="stopped"
-
-# https://docs.microsoft.com/en-us/azure/virtual-machines/states-billing#provisioning-states
-declare -A AZURE_PROVISION_STATE_MAP
-AZURE_PROVISION_STATE_MAP["Creating"]="pending_create"
-AZURE_PROVISION_STATE_MAP["Updating"]="pending_create"
-AZURE_PROVISION_STATE_MAP["Migrating"]="pending_create"
-AZURE_PROVISION_STATE_MAP["Failed"]="error"
-AZURE_PROVISION_STATE_MAP["Succeeded"]="running"
-AZURE_PROVISION_STATE_MAP["Deleting"]="pending_delete"
-
-function checkValNotNull() {
- if [ -z "$1" -o "$1" == "null" ]; then
- echo "failed to fetch value $2"
- return 1
- fi
- return 0
-}
-
-function requestedArch() {
- ARCH=$(echo "$INPUT" | jq -c -r '.arch')
- checkValNotNull "${ARCH}" "arch" || return $?
- echo "${ARCH}"
-}
-
-function downloadURL() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_OS="${AZURE_OS_TO_GH_OS_MAP[$1]}"
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- URL=$(echo "$INPUT" | jq -c -r --arg OS "$GH_OS" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).download_url')
- checkValNotNull "${URL}" "download URL" || return $?
- echo "${URL}"
-}
-
-function tempDownloadToken() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- TOKEN=$(echo "$INPUT" | jq -c -r --arg OS "$1" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).temp_download_token')
- echo "${TOKEN}"
-}
-
-function runnerTokenURL() {
- METADATA_URL=$(echo "$INPUT" | jq -c -r '."metadata-url"')
- checkValNotNull "${METADATA_URL}" "metadata-url" || return $?
- echo "${METADATA_URL}/runner-registration-token/"
-}
-
-function downloadFilename() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_OS="${AZURE_OS_TO_GH_OS_MAP[$1]}"
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- FN=$(echo "$INPUT" | jq -c -r --arg OS "$GH_OS" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).filename')
- checkValNotNull "${FN}" "download filename" || return $?
- echo "${FN}"
-}
-
-function poolID() {
- POOL_ID=$(echo "$INPUT" | jq -c -r '.pool_id')
- checkValNotNull "${POOL_ID}" "pool_id" || return $?
- echo "${POOL_ID}"
-}
-
-function vmSize() {
- VM_SIZE=$(echo "$INPUT" | jq -c -r '.flavor')
- checkValNotNull "${VM_SIZE}" "flavor" || return $?
- echo "${VM_SIZE}"
-}
-
-function imageUrn() {
- IMG=$(echo "$INPUT" | jq -c -r '.image')
- checkValNotNull "${IMG}" "image" || return $?
- echo "${IMG}"
-}
-
-function getOSImageDetails() {
- IMAGE=$(echo "$INPUT" | jq -r -c '.image')
- IMAGE_DETAILS=$(az vm image show --urn "$IMAGE" -o json --only-show-errors)
- echo "$IMAGE_DETAILS"
-}
-
-function repoURL() {
- REPO=$(echo "$INPUT" | jq -c -r '.repo_url')
- checkValNotNull "${REPO}" "repo_url" || return $?
- echo "${REPO}"
-}
-
-function callbackURL() {
- CB_URL=$(echo "$INPUT" | jq -c -r '."callback-url"')
- checkValNotNull "${CB_URL}" "callback-url" || return $?
- echo "${CB_URL}"
-}
-
-function callbackToken() {
- CB_TK=$(echo "$INPUT" | jq -c -r '."instance-token"')
- checkValNotNull "${CB_TK}" "instance-token" || return $?
- echo "${CB_TK}"
-}
-
-function instanceName() {
- NAME=$(echo "$INPUT" | jq -c -r '.name')
- checkValNotNull "${NAME}" "name" || return $?
- echo "${NAME}"
-}
-
-function labels() {
- LBL=$(echo "$INPUT" | jq -c -r '.labels | join(",")')
- checkValNotNull "${LBL}" "labels" || return $?
- echo "${LBL}"
-}
-
-function vmStatus() {
- [ -z "$1" -o -z "$2" ] && return 1
-
- RG_DETAILS=$(az group show -n "$1" -o json --only-show-errors)
- RG_STATE=$(echo "$RG_DETAILS" | jq -r '.properties.provisioningState')
- STATUS="${AZURE_PROVISION_STATE_MAP[$RG_STATE]}"
- if [[ "$STATUS" != "running" ]]; then
- echo "$STATUS"
- return 0
- fi
- VM_DETAILS=$(az vm show -g "$1" -n "$2" --show-details -o json --only-show-errors)
- VM_STATE=$(echo "$VM_DETAILS" | jq -r '.provisioningState')
- STATUS="${AZURE_PROVISION_STATE_MAP[$VM_STATE]}"
- if [[ "$STATUS" != "running" ]]; then
- echo "$STATUS"
- return 0
- fi
- VM_POWER_STATE=$(echo "$VM_DETAILS" | jq -r '.powerState')
- VM_STATUS="${AZURE_POWER_STATE_MAP[$VM_POWER_STATE]}"
- if [[ -z "${VM_STATUS}" ]]; then
- echo "unknown"
- return 0
- fi
- echo "${VM_STATUS}"
-}
-
-function getCloudConfig() {
- IMAGE_DETAILS=$(getOSImageDetails)
-
- OS_TYPE=$(echo "${IMAGE_DETAILS}" | jq -c -r '.osDiskImage.operatingSystem')
- checkValNotNull "${OS_TYPE}" "operatingSystem" || return $?
-
- ARCH=$(requestedArch)
- DW_URL=$(downloadURL "${OS_TYPE}" "${ARCH}")
- DW_TOKEN=$(tempDownloadToken "${OS_TYPE}" "${ARCH}")
- DW_FILENAME=$(downloadFilename "${OS_TYPE}" "${ARCH}")
- LABELS=$(labels)
-
- TMP_SCRIPT=$(mktemp)
- TMP_CC=$(mktemp)
-
- INSTALL_TPL=$(cat "${TEMPLATES}/install_runner.tpl")
- CC_TPL=$(cat "${TEMPLATES}/userdata.tpl")
- echo "$INSTALL_TPL" | sed -e "s|GARM_CALLBACK_URL|$(callbackURL)|g" \
- -e "s|GARM_CALLBACK_TOKEN|$(callbackToken)|g" \
- -e "s|GH_DOWNLOAD_URL|${DW_URL}|g" \
- -e "s|GH_FILENAME|${DW_FILENAME}|g" \
- -e "s|GH_TARGET_URL|$(repoURL)|g" \
- -e "s|GARM_METADATA_URL|$(runnerTokenURL)|g" \
- -e "s|GH_RUNNER_NAME|$(instanceName)|g" \
- -e "s|GH_TEMP_DOWNLOAD_TOKEN|${DW_TOKEN}|g" \
- -e "s|GH_RUNNER_LABELS|${LABELS}|g" > ${TMP_SCRIPT}
-
- AS_B64=$(base64 -w0 ${TMP_SCRIPT})
- echo "${CC_TPL}" | sed "s|RUNNER_INSTALL_B64|${AS_B64}|g" > ${TMP_CC}
- echo "${TMP_CC}"
-}
-
-function CreateInstance() {
- if [ -z "$INPUT" ]; then
- echo "expected build params in stdin"
- exit 1
- fi
-
- CC_FILE=$(getCloudConfig)
- VM_SIZE=$(vmSize)
- INSTANCE_NAME=$(instanceName)
- IMAGE_URN=$(imageUrn)
- IMAGE_DETAILS=$(getOSImageDetails)
-
- OS_TYPE=$(echo "${IMAGE_DETAILS}" | jq -c -r '.osDiskImage.operatingSystem' | tr '[:upper:]' '[:lower:]')
- checkValNotNull "${OS_TYPE}" "os_type" || return $?
- OS_NAME=$(echo "${IMAGE_URN}" | cut -d ':' -f2)
- OS_VERSION=$(echo "${IMAGE_URN}" | cut -d ':' -f3)
- ARCH="amd64"
-
- TAGS="garm_controller_id=${GARM_CONTROLLER_ID} garm_pool_id=${GARM_POOL_ID} os_type=${OS_TYPE} os_name=${OS_NAME} os_version=${OS_VERSION} os_arch=${ARCH}"
-
- set +e
-
- az group create -n $INSTANCE_NAME -l $LOCATION --tags $TAGS --only-show-errors -o none
- az vm create -g $INSTANCE_NAME -n $INSTANCE_NAME -l $LOCATION --size $VM_SIZE --image $IMAGE_URN --tags $TAGS --nsg-rule none --public-ip-address "" --user-data "${CC_FILE}" -o none --only-show-errors
- if [[ $? -ne 0 ]]; then
- az group delete -n $INSTANCE_NAME --no-wait --y -o none --only-show-errors
- echo "Failed to create Azure VM"
- exit 1
- fi
- rm -f "${CC_FILE}"
-
- set -e
-
- STATUS=$(vmStatus $INSTANCE_NAME $INSTANCE_NAME)
- FAULT_VAL=""
-
- jq -rnc \
- --arg PROVIDER_ID "${INSTANCE_NAME}" \
- --arg NAME "${INSTANCE_NAME}" \
- --arg OS_TYPE "${OS_TYPE}" \
- --arg OS_NAME "${OS_NAME}" \
- --arg OS_VERSION "${OS_VERSION}" \
- --arg ARCH "${ARCH}" \
- --arg STATUS "${STATUS}" \
- --arg POOL_ID "${GARM_POOL_ID}" \
- --arg FAULT "${FAULT_VAL}" \
- '{"provider_id": $PROVIDER_ID, "name": $NAME, "os_type": $OS_TYPE, "os_name": $OS_NAME, "os_version": $OS_VERSION, "os_arch": $ARCH, "status": $STATUS, "pool_id": $POOL_ID, "provider_fault": $FAULT}'
-}
-
-function DeleteInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ]; then
- echo "missing instance ID in env"
- return 1
- fi
-
- set +e
- rg_info=$(az group show -n "${instance_id}" -o json --only-show-errors 2>&1)
- if [ $? -ne 0 ]; then
- CODE=$?
- set -e
- if echo "${rg_info}" | grep -q "ResourceGroupNotFound"; then
- return 0
- fi
- return $CODE
- fi
- set -e
- az group delete -n "${instance_id}" --no-wait --y --only-show-errors
-}
-
-function StartInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ]; then
- echo "missing instance ID in env"
- return 1
- fi
-
- az vm start -g "${instance_id}" -n "${instance_id}" -o none --only-show-errors
-}
-
-function StopServer() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ]; then
- echo "missing instance ID in env"
- return 1
- fi
-
- az vm deallocate -g "${instance_id}" -n "${instance_id}" -o none --only-show-errors
-}
-
-function GetInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- info=$(az vm show -d -n $instance_id -g $instance_id -o json --only-show-errors 2>&1)
- echo $info | jq -r '
- {
- provider_id: .name,
- name: .name,
- os_type: .tags.os_type,
- os_name: .tags.os_name,
- os_version: .tags.os_version,
- os_arch: .tags.os_arch,
- pool_id: .tags.garm_pool_id,
- status: {"VM starting": "pending_create", "VM running": "running", "VM stopping": "stopped", "VM stopped": "stopped", "VM deallocating": "stopped", "VM deallocated": "stopped"}[.powerState]
- }'
-}
-
-function ListInstances() {
- INSTANCES=$(az vm list --query "[?tags.garm_pool_id == '${GARM_POOL_ID}']" -o json --only-show-errors 2>&1)
- echo $info | jq -r '[
- .[] | {
- provider_id: .name,
- name: .name,
- os_type: .tags.os_type,
- os_name: .tags.os_name,
- os_version: .tags.os_version,
- os_arch: .tags.os_arch,
- pool_id: .tags.garm_pool_id,
- status: {"Creating": "pending_create", "Migrating": "pending_create", "Failed": "error", "Succeeded": "running", "Deleting": "pending_delete"}[.provisioningState]
- }]'
-}
-
-# Login to Azure
-checkValNotNull "${AZURE_SUBSCRIPTION_ID}" "AZURE_SUBSCRIPTION_ID"
-checkValNotNull "${AZURE_TENANT_ID}" "AZURE_TENANT_ID"
-checkValNotNull "${AZURE_CLIENT_ID}" "AZURE_CLIENT_ID"
-checkValNotNull "${AZURE_CLIENT_SECRET}" "AZURE_CLIENT_SECRET"
-
-export AZURE_CONFIG_DIR="${MYDIR}/.azure"
-
-az login --service-principal -u $AZURE_CLIENT_ID -p $AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID -o none --only-show-errors
-az account set -s $AZURE_SUBSCRIPTION_ID -o none --only-show-errors
-
-case "$GARM_COMMAND" in
- "CreateInstance")
- CreateInstance
- ;;
- "DeleteInstance")
- DeleteInstance
- ;;
- "GetInstance")
- GetInstance
- ;;
- "ListInstances")
- ListInstances
- ;;
- "StartInstance")
- StartInstance
- ;;
- "StopInstance")
- StopServer
- ;;
- "RemoveAllInstances")
- echo "RemoveAllInstances not implemented"
- exit 1
- ;;
- *)
- echo "Invalid GARM provider command: \"$GARM_COMMAND\""
- exit 1
- ;;
-esac
diff --git a/contrib/providers.d/openstack/README.md b/contrib/providers.d/openstack/README.md
deleted file mode 100644
index 4995e543..00000000
--- a/contrib/providers.d/openstack/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# OpenStack external provider for garm
-
-This is an example external provider, written for OpenStack. It is a simple bash script that implements the external provider interface, in order to supply ```garm``` with compute instances. This is just an example, complete with a sample config file.
-
-Not all functions are implemented, just the bare minimum to get it to work with the current feature set of ```garm```. It is not meant for production, as it needs a lot more error checking, retries, and potentially more flexibility to be of any use in a real environment.
-
-Images that are used with garm require the following properties set on the image:
-
- * os_type (one of: windows, linux)
- * os_distro
- * os_version
- * architecture (one of: x86_64, armv7l, mips64, mips64el, mips, mipsel)
diff --git a/contrib/providers.d/openstack/cloudconfig/install_runner.tpl b/contrib/providers.d/openstack/cloudconfig/install_runner.tpl
deleted file mode 100644
index 910d8eac..00000000
--- a/contrib/providers.d/openstack/cloudconfig/install_runner.tpl
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-METADATA_URL="GARM_METADATA_URL"
-CALLBACK_URL="GARM_CALLBACK_URL"
-BEARER_TOKEN="GARM_CALLBACK_TOKEN"
-DOWNLOAD_URL="GH_DOWNLOAD_URL"
-DOWNLOAD_TOKEN="GH_TEMP_DOWNLOAD_TOKEN"
-FILENAME="GH_FILENAME"
-TARGET_URL="GH_TARGET_URL"
-RUNNER_NAME="GH_RUNNER_NAME"
-RUNNER_LABELS="GH_RUNNER_LABELS"
-TEMP_TOKEN=""
-
-
-if [ -z "$METADATA_URL" ];then
- echo "no token is available and METADATA_URL is not set"
- exit 1
-fi
-
-function call() {
- PAYLOAD="$1"
- curl --fail -s -X POST -d "${PAYLOAD}" -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${CALLBACK_URL}" || echo "failed to call home: exit code ($?)"
-}
-
-function sendStatus() {
- MSG="$1"
- call "{\"status\": \"installing\", \"message\": \"$MSG\"}"
-}
-
-function success() {
- MSG="$1"
- ID=$2
- call "{\"status\": \"idle\", \"message\": \"$MSG\", \"agent_id\": $ID}"
-}
-
-function fail() {
- MSG="$1"
- call "{\"status\": \"failed\", \"message\": \"$MSG\"}"
- exit 1
-}
-
-if [ ! -z "$DOWNLOAD_TOKEN" ]; then
- TEMP_TOKEN="Authorization: Bearer $DOWNLOAD_TOKEN"
-fi
-
-sendStatus "downloading tools from ${DOWNLOAD_URL}"
-curl --fail -L -H "${TEMP_TOKEN}" -o "/home/runner/${FILENAME}" "${DOWNLOAD_URL}" || fail "failed to download tools"
-
-mkdir -p /home/runner/actions-runner || fail "failed to create actions-runner folder"
-
-sendStatus "extracting runner"
-tar xf "/home/runner/${FILENAME}" -C /home/runner/actions-runner/ || fail "failed to extract runner"
-chown runner:runner -R /home/runner/actions-runner/ || fail "failed to change owner"
-
-sendStatus "installing dependencies"
-cd /home/runner/actions-runner
-sudo ./bin/installdependencies.sh || fail "failed to install dependencies"
-
-sendStatus "fetching runner registration token"
-GITHUB_TOKEN=$(curl --fail -s -X GET -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${METADATA_URL}" || fail "failed to get runner registration token")
-
-sendStatus "configuring runner"
-sudo -u runner -- ./config.sh --unattended --url "${TARGET_URL}" --token "${GITHUB_TOKEN}" --name "${RUNNER_NAME}" --labels "${RUNNER_LABELS}" --ephemeral || fail "failed to configure runner"
-
-sendStatus "installing runner service"
-./svc.sh install runner || fail "failed to install service"
-
-sendStatus "starting service"
-./svc.sh start || fail "failed to start service"
-
-set +e
-AGENT_ID=$(grep "agentId" /home/runner/actions-runner/.runner | tr -d -c 0-9)
-if [ $? -ne 0 ];then
- fail "failed to get agent ID"
-fi
-set -e
-
-success "runner successfully installed" $AGENT_ID
\ No newline at end of file
diff --git a/contrib/providers.d/openstack/cloudconfig/userdata.tpl b/contrib/providers.d/openstack/cloudconfig/userdata.tpl
deleted file mode 100644
index 10ef2b51..00000000
--- a/contrib/providers.d/openstack/cloudconfig/userdata.tpl
+++ /dev/null
@@ -1,31 +0,0 @@
-#cloud-config
-package_upgrade: true
-packages:
- - curl
- - tar
-system_info:
- default_user:
- name: runner
- home: /home/runner
- shell: /bin/bash
- groups:
- - sudo
- - adm
- - cdrom
- - dialout
- - dip
- - video
- - plugdev
- - netdev
- - docker
- - lxd
- sudo: ALL=(ALL) NOPASSWD:ALL
-runcmd:
- - /install_runner.sh
- - rm -f /install_runner.sh
-write_files:
- - encoding: b64
- content: RUNNER_INSTALL_B64
- owner: root:root
- path: /install_runner.sh
- permissions: "755"
diff --git a/contrib/providers.d/openstack/garm-external-provider b/contrib/providers.d/openstack/garm-external-provider
deleted file mode 100755
index f2602f57..00000000
--- a/contrib/providers.d/openstack/garm-external-provider
+++ /dev/null
@@ -1,445 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-if [ ! -t 0 ]
-then
- INPUT=$(cat -)
-fi
-MYPATH=$(realpath ${BASH_SOURCE[0]})
-MYDIR=$(dirname "${MYPATH}")
-TEMPLATES="$MYDIR/cloudconfig"
-
-# Defaults
-# set this variable to 0 in the provider config to disable.
-BOOT_FROM_VOLUME=${BOOT_FROM_VOLUME:-1}
-
-# END Defaults
-
-if [ -z "$GARM_PROVIDER_CONFIG_FILE" ]
-then
- echo "no config file specified in env"
- exit 1
-fi
-
-source "$GARM_PROVIDER_CONFIG_FILE"
-
-declare -A OS_TO_GH_ARCH_MAP
-OS_TO_GH_ARCH_MAP["x86_64"]="x64"
-OS_TO_GH_ARCH_MAP["armv7l"]="arm64"
-OS_TO_GH_ARCH_MAP["mips64"]="arm64"
-OS_TO_GH_ARCH_MAP["mips64el"]="arm64"
-OS_TO_GH_ARCH_MAP["mips"]="arm"
-OS_TO_GH_ARCH_MAP["mipsel"]="arm"
-
-declare -A OS_TO_GARM_ARCH_MAP
-OS_TO_GARM_ARCH_MAP["x86_64"]="amd64"
-OS_TO_GARM_ARCH_MAP["armv7l"]="arm64"
-OS_TO_GARM_ARCH_MAP["mips64"]="arm64"
-OS_TO_GARM_ARCH_MAP["mips64el"]="arm64"
-OS_TO_GARM_ARCH_MAP["mips"]="arm"
-OS_TO_GARM_ARCH_MAP["mipsel"]="arm"
-
-declare -A GARM_TO_GH_ARCH_MAP
-GARM_TO_GH_ARCH_MAP["amd64"]="x64"
-GARM_TO_GH_ARCH_MAP["arm"]="arm"
-GARM_TO_GH_ARCH_MAP["arm64"]="arm64"
-
-declare -A STATUS_MAP
-STATUS_MAP["ACTIVE"]="running"
-STATUS_MAP["SHUTOFF"]="stopped"
-STATUS_MAP["BUILD"]="pending_create"
-STATUS_MAP["ERROR"]="error"
-STATUS_MAP["DELETING"]="pending_delete"
-
-function checkValNotNull() {
- if [ -z "$1" -o "$1" == "null" ];then
- echo "failed to fetch value $2"
- return 1
- fi
- return 0
-}
-
-function getOSImageDetails() {
- IMAGE_ID=$(echo "$INPUT" | jq -r -c '.image')
- OS_IMAGE=$(openstack image show "$IMAGE_ID" -f json)
- echo "$OS_IMAGE"
-}
-
-function getOpenStackNetworkID() {
- if [ -z "$OPENSTACK_PRIVATE_NETWORK" ]
- then
- echo "no network specified in config"
- return 1
- fi
-
- NET_ID=$(openstack network show ${OPENSTACK_PRIVATE_NETWORK} -f value -c id)
- if [ -z "$NET_ID" ];then
- echo "failed to find network $OPENSTACK_PRIVATE_NETWORK"
- fi
- echo ${NET_ID}
-}
-
-function getVolumeSizeFromFlavor() {
- local flavor="$1"
-
- FLAVOR_DETAILS=$(openstack flavor show "${flavor}" -f json)
- DISK_SIZE=$(echo "$FLAVOR_DETAILS" | jq -c -r '.disk')
- if [ -z "$DISK_SIZE" ];then
- echo "failed to get disk size from flavor"
- return 1
- fi
-
- echo ${DISK_SIZE}
-}
-
-function waitForVolume() {
- local volumeName=$1
- set +e
- status=$(openstack volume show "${volumeName}" -f json | jq -r -c '.status')
- if [ $? -ne 0 ];then
- CODE=$?
- set -e
- return $CODE
- fi
- set -e
- while [ "${status}" != "available" -a "${status}" != "error" ];do
- status=$(openstack volume show "${volumeName}" -f json | jq -r -c '.status')
- done
-}
-
-function createVolumeFromImage() {
- local image="$1"
- local disk_size="$2"
- local instance_name="$3"
- if [ -z ${image} -o -z ${disk_size} -o -z "${instance_name}" ];then
- echo "missing image, disk size or instance name in function call"
- return 1
- fi
- # Instance names contain a UUID. It should be safe to create a volume with the same name and
- # expect it to be unique.
- set +e
- VOLUME_INFO=$(openstack volume create -f json --image "${image}" --size "${disk_size}" "${instance_name}")
- if [ $? -ne 0 ]; then
- CODE=$?
- openstack volume delete "${instance_name}" || true
- set -e
- return $CODE
- fi
- waitForVolume "${instance_name}"
- echo "${VOLUME_INFO}"
-}
-
-function requestedArch() {
- ARCH=$(echo "$INPUT" | jq -c -r '.arch')
- checkValNotNull "${ARCH}" "arch" || return $?
- echo "${ARCH}"
-}
-
-function downloadURL() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- URL=$(echo "$INPUT" | jq -c -r --arg OS "$1" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).download_url')
- checkValNotNull "${URL}" "download URL" || return $?
- echo "${URL}"
-}
-
-function tempDownloadToken() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- TOKEN=$(echo "$INPUT" | jq -c -r --arg OS "$1" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).temp_download_token')
- echo "${TOKEN}"
-}
-
-function runnerTokenURL() {
- METADATA_URL=$(echo "$INPUT" | jq -c -r '."metadata-url"')
- checkValNotNull "${METADATA_URL}" "metadata-url" || return $?
- echo "${METADATA_URL}/runner-registration-token/"
-}
-
-function downloadFilename() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- FN=$(echo "$INPUT" | jq -c -r --arg OS "$1" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).filename')
- checkValNotNull "${FN}" "download filename" || return $?
- echo "${FN}"
-}
-
-function poolID() {
- POOL_ID=$(echo "$INPUT" | jq -c -r '.pool_id')
- checkValNotNull "${POOL_ID}" "pool_id" || return $?
- echo "${POOL_ID}"
-}
-
-function flavor() {
- FLAVOR=$(echo "$INPUT" | jq -c -r '.flavor')
- checkValNotNull "${FLAVOR}" "flavor" || return $?
- echo "${FLAVOR}"
-}
-
-function image() {
- IMG=$(echo "$INPUT" | jq -c -r '.image')
- checkValNotNull "${IMG}" "image" || return $?
- echo "${IMG}"
-}
-
-function repoURL() {
- REPO=$(echo "$INPUT" | jq -c -r '.repo_url')
- checkValNotNull "${REPO}" "repo_url" || return $?
- echo "${REPO}"
-}
-
-function callbackURL() {
- CB_URL=$(echo "$INPUT" | jq -c -r '."callback-url"')
- checkValNotNull "${CB_URL}" "callback-url" || return $?
- echo "${CB_URL}"
-}
-
-function callbackToken() {
- CB_TK=$(echo "$INPUT" | jq -c -r '."instance-token"')
- checkValNotNull "${CB_TK}" "instance-token" || return $?
- echo "${CB_TK}"
-}
-
-function instanceName() {
- NAME=$(echo "$INPUT" | jq -c -r '.name')
- checkValNotNull "${NAME}" "name" || return $?
- echo "${NAME}"
-}
-
-function labels() {
- LBL=$(echo "$INPUT" | jq -c -r '.labels | join(",")')
- checkValNotNull "${LBL}" "labels" || return $?
- echo "${LBL}"
-}
-
-function getCloudConfig() {
- IMAGE_DETAILS=$(getOSImageDetails)
-
- OS_TYPE=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.os_type')
- checkValNotNull "${OS_TYPE}" "os_type" || return $?
-
- ARCH=$(requestedArch)
- DW_URL=$(downloadURL "${OS_TYPE}" "${ARCH}")
- DW_TOKEN=$(tempDownloadToken "${OS_TYPE}" "${ARCH}")
- DW_FILENAME=$(downloadFilename "${OS_TYPE}" "${ARCH}")
- LABELS=$(labels)
-
- TMP_SCRIPT=$(mktemp)
- TMP_CC=$(mktemp)
-
- INSTALL_TPL=$(cat "${TEMPLATES}/install_runner.tpl")
- CC_TPL=$(cat "${TEMPLATES}/userdata.tpl")
- echo "$INSTALL_TPL" | sed -e "s|GARM_CALLBACK_URL|$(callbackURL)|g" \
- -e "s|GARM_CALLBACK_TOKEN|$(callbackToken)|g" \
- -e "s|GH_DOWNLOAD_URL|${DW_URL}|g" \
- -e "s|GH_FILENAME|${DW_FILENAME}|g" \
- -e "s|GH_TARGET_URL|$(repoURL)|g" \
- -e "s|GARM_METADATA_URL|$(runnerTokenURL)|g" \
- -e "s|GH_RUNNER_NAME|$(instanceName)|g" \
- -e "s|GH_TEMP_DOWNLOAD_TOKEN|${DW_TOKEN}|g" \
- -e "s|GH_RUNNER_LABELS|${LABELS}|g" > ${TMP_SCRIPT}
-
- AS_B64=$(base64 -w0 ${TMP_SCRIPT})
- echo "${CC_TPL}" | sed "s|RUNNER_INSTALL_B64|${AS_B64}|g" > ${TMP_CC}
- echo "${TMP_CC}"
-}
-
-function waitForServer() {
- local srv_id="$1"
-
- srv_info=$(openstack server show -f json "${srv_id}")
- [ $? -ne 0 ] && return $?
-
- status=$(echo "${srv_info}" | jq -r -c '.status')
-
- while [ "${status}" != "ERROR" -a "${status}" != "ACTIVE" ];do
- sleep 0.5
- srv_info=$(openstack server show -f json "${srv_id}")
- [ $? -ne 0 ] && return $?
- status=$(echo "${srv_info}" | jq -r -c '.status')
- done
- echo "${srv_info}"
-}
-
-function CreateInstance() {
- if [ -z "$INPUT" ];then
- echo "expected build params in stdin"
- exit 1
- fi
-
- CC_FILE=$(getCloudConfig)
- FLAVOR=$(flavor)
- IMAGE=$(image)
- INSTANCE_NAME=$(instanceName)
- NET=$(getOpenStackNetworkID)
- IMAGE_DETAILS=$(getOSImageDetails)
-
- OS_TYPE=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.os_type')
- checkValNotNull "${OS_TYPE}" "os_type" || return $?
- DISTRO=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.os_distro')
- checkValNotNull "${DISTRO}" "os_distro" || return $?
- VERSION=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.os_version')
- checkValNotNull "${VERSION}" "os_version" || return $?
- ARCH=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.architecture')
- checkValNotNull "${ARCH}" "architecture" || return $?
- GH_ARCH=${OS_TO_GH_ARCH_MAP[${ARCH}]}
-
- if [ -z "${GH_ARCH}" ];then
- GH_ARCH=${ARCH}
- fi
-
- SOURCE_ARGS=""
-
- if [ "${BOOT_FROM_VOLUME}" -eq 1 ];then
- VOL_SIZE=$(getVolumeSizeFromFlavor "${FLAVOR}")
- VOL_INFO=$(createVolumeFromImage "${IMAGE}" "${VOL_SIZE}" "${INSTANCE_NAME}")
- if [ $? -ne 0 ];then
- openstack volume delete "${INSTANCE_NAME}" || true
- fi
- SOURCE_ARGS="--volume ${INSTANCE_NAME}"
- else
- SOURCE_ARGS="--image ${IMAGE}"
- fi
-
- set +e
-
- TAGS="--tag garm-controller-id=${GARM_CONTROLLER_ID} --tag garm-pool-id=${GARM_POOL_ID}"
- PROPERTIES="--property os_type=${OS_TYPE} --property os_name=${DISTRO} --property os_version=${VERSION} --property os_arch=${GH_ARCH} --property pool_id=${GARM_POOL_ID}"
- SRV_DETAILS=$(openstack server create --os-compute-api-version 2.52 ${SOURCE_ARGS} ${TAGS} ${PROPERTIES} --flavor "${FLAVOR}" --user-data="${CC_FILE}" --network="${NET}" "${INSTANCE_NAME}")
- if [ $? -ne 0 ];then
- openstack volume delete "${INSTANCE_NAME}" || true
- exit 1
- fi
- SRV_DETAILS=$(waitForServer "${INSTANCE_NAME}")
- if [ $? -ne 0 ];then
- CODE=$?
- # cleanup
- rm -f "${CC_FILE}" || true
- openstack server delete "${INSTANCE_NAME}" || true
- openstack volume delete "${INSTANCE_NAME}" || true
- set -e
- FAULT=$(echo "${SRV_DETAILS}"| jq -rc '.fault')
- echo "Failed to create server: ${FAULT}"
- exit $CODE
- fi
- set -e
- rm -f "${CC_FILE}" || true
-
- SRV_ID=$(echo "${SRV_DETAILS}" | jq -r -c '.id')
- STATUS=$(echo "${SRV_DETAILS}" | jq -r -c '.status')
- FAULT=$(echo "${SRV_DETAILS}" | jq -r -c '.fault')
- FAULT_VAL=""
- if [ ! -z "${FAULT}" -a "${FAULT}" != "null" ];then
- FAULT_VAL=$(echo "${FAULT}" | base64 -w0)
- fi
-
- jq -rnc \
- --arg PROVIDER_ID ${SRV_ID} \
- --arg NAME "${INSTANCE_NAME}" \
- --arg OS_TYPE "${OS_TYPE}" \
- --arg OS_NAME "${DISTRO}" \
- --arg OS_VERSION "${VERSION}" \
- --arg ARCH "${GH_ARCH}" \
- --arg STATUS "${STATUS_MAP[${STATUS}]}" \
- --arg POOL_ID "${GARM_POOL_ID}" \
- --arg FAULT "${FAULT_VAL}" \
- '{"provider_id": $PROVIDER_ID, "name": $NAME, "os_type": $OS_TYPE, "os_name": $OS_NAME, "os_version": $OS_VERSION, "os_arch": $ARCH, "status": $STATUS, "pool_id": $POOL_ID, "provider_fault": $FAULT}'
-}
-
-function DeleteInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ];then
- echo "missing instance ID in env"
- return 1
- fi
-
- set +e
- instance_info=$(openstack server show "${instance_id}" -f json 2>&1)
- if [ $? -ne 0 ];then
- CODE=$?
- set -e
- if [ "${instance_info}" == "No server with a name or ID of*" ];then
- return 0
- fi
- return $CODE
- fi
- set -e
- VOLUMES=$(echo "${instance_info}" | jq -r -c '.volumes_attached[] | .id')
-
- openstack server delete "${instance_id}"
- for vol in "$VOLUMES";do
- waitForVolume "${vol}"
- openstack volume delete $vol || true
- done
-}
-
-function StartInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ];then
- echo "missing instance ID in env"
- return 1
- fi
-
- openstack server start "${instance_id}"
-}
-
-function StopServer() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ];then
- echo "missing instance ID in env"
- return 1
- fi
-
- openstack server stop "${instance_id}"
-}
-
-function ListInstances() {
- INSTANCES=$(openstack server list --os-compute-api-version 2.52 --tags garm-pool-id=${GARM_POOL_ID} --long -f json)
- echo ${INSTANCES} | jq -r '[
- .[] | .Properties * {
- provider_id: .ID,
- name: .Name,
- status: {"ACTIVE": "running", "SHUTOFF": "stopped", "BUILD": "pending_create", "ERROR": "error", "DELETING": "pending_delete"}[.Status]
- }]'
-}
-
-function GetInstance() {
- INSTANCE=$(openstack server show --os-compute-api-version 2.52 ${GARM_INSTANCE_ID} -f json)
- echo ${INSTANCES} | jq -r '.properties * {
- provider_id: .id,
- name: .name,
- status: {"ACTIVE": "running", "SHUTOFF": "stopped", "BUILD": "pending_create", "ERROR": "error", "DELETING": "pending_delete"}[.status]
- }'
-}
-
-case "$GARM_COMMAND" in
- "CreateInstance")
- CreateInstance
- ;;
- "DeleteInstance")
- DeleteInstance
- ;;
- "GetInstance")
- GetInstance
- ;;
- "ListInstances")
- ListInstances
- ;;
- "StartInstance")
- StartInstance
- ;;
- "StopInstance")
- StopServer
- ;;
- "RemoveAllInstances")
- echo "RemoveAllInstances not implemented"
- exit 1
- ;;
- *)
- echo "Invalid GARM provider command: \"$GARM_COMMAND\""
- exit 1
- ;;
-esac
-
diff --git a/contrib/providers.d/openstack/keystonerc b/contrib/providers.d/openstack/keystonerc
deleted file mode 100644
index 1b702dd7..00000000
--- a/contrib/providers.d/openstack/keystonerc
+++ /dev/null
@@ -1,16 +0,0 @@
-# OpenStack client config
-export OS_REGION_NAME=RegionOne
-export OS_AUTH_VERSION=3
-export OS_AUTH_URL=http://10.0.8.36:5000/v3
-export OS_PROJECT_DOMAIN_NAME=admin_domain
-export OS_USERNAME=admin
-export OS_AUTH_TYPE=password
-export OS_USER_DOMAIN_NAME=admin_domain
-export OS_PROJECT_NAME=admin
-export OS_PASSWORD=Iegeehahth4suSie
-export OS_IDENTITY_API_VERSION=3
-
-
-# GARM config
-export OPENSTACK_PRIVATE_NETWORK="int_net"
-export BOOT_FROM_VOLUME=1
diff --git a/database/common/common.go b/database/common/common.go
deleted file mode 100644
index dcc05882..00000000
--- a/database/common/common.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package common
-
-import (
- "context"
-
- "github.com/cloudbase/garm/params"
-)
-
-type RepoStore interface {
- CreateRepository(ctx context.Context, owner, name, credentialsName, webhookSecret string) (params.Repository, error)
- GetRepository(ctx context.Context, owner, name string) (params.Repository, error)
- GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error)
- ListRepositories(ctx context.Context) ([]params.Repository, error)
- DeleteRepository(ctx context.Context, repoID string) error
- UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (params.Repository, error)
-
- CreateRepositoryPool(ctx context.Context, repoId string, param params.CreatePoolParams) (params.Pool, error)
-
- GetRepositoryPool(ctx context.Context, repoID, poolID string) (params.Pool, error)
- DeleteRepositoryPool(ctx context.Context, repoID, poolID string) error
- UpdateRepositoryPool(ctx context.Context, repoID, poolID string, param params.UpdatePoolParams) (params.Pool, error)
- FindRepositoryPoolByTags(ctx context.Context, repoID string, tags []string) (params.Pool, error)
-
- ListRepoPools(ctx context.Context, repoID string) ([]params.Pool, error)
- ListRepoInstances(ctx context.Context, repoID string) ([]params.Instance, error)
-}
-
-type OrgStore interface {
- CreateOrganization(ctx context.Context, name, credentialsName, webhookSecret string) (params.Organization, error)
- GetOrganization(ctx context.Context, name string) (params.Organization, error)
- GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error)
- ListOrganizations(ctx context.Context) ([]params.Organization, error)
- DeleteOrganization(ctx context.Context, orgID string) error
- UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (params.Organization, error)
-
- CreateOrganizationPool(ctx context.Context, orgId string, param params.CreatePoolParams) (params.Pool, error)
- GetOrganizationPool(ctx context.Context, orgID, poolID string) (params.Pool, error)
- DeleteOrganizationPool(ctx context.Context, orgID, poolID string) error
- UpdateOrganizationPool(ctx context.Context, orgID, poolID string, param params.UpdatePoolParams) (params.Pool, error)
-
- FindOrganizationPoolByTags(ctx context.Context, orgID string, tags []string) (params.Pool, error)
- ListOrgPools(ctx context.Context, orgID string) ([]params.Pool, error)
- ListOrgInstances(ctx context.Context, orgID string) ([]params.Instance, error)
-}
-
-type EnterpriseStore interface {
- CreateEnterprise(ctx context.Context, name, credentialsName, webhookSecret string) (params.Enterprise, error)
- GetEnterprise(ctx context.Context, name string) (params.Enterprise, error)
- GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error)
- ListEnterprises(ctx context.Context) ([]params.Enterprise, error)
- DeleteEnterprise(ctx context.Context, enterpriseID string) error
- UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (params.Enterprise, error)
-
- CreateEnterprisePool(ctx context.Context, enterpriseID string, param params.CreatePoolParams) (params.Pool, error)
- GetEnterprisePool(ctx context.Context, enterpriseID, poolID string) (params.Pool, error)
- DeleteEnterprisePool(ctx context.Context, enterpriseID, poolID string) error
- UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID string, param params.UpdatePoolParams) (params.Pool, error)
-
- FindEnterprisePoolByTags(ctx context.Context, enterpriseID string, tags []string) (params.Pool, error)
- ListEnterprisePools(ctx context.Context, enterpriseID string) ([]params.Pool, error)
- ListEnterpriseInstances(ctx context.Context, enterpriseID string) ([]params.Instance, error)
-}
-
-type PoolStore interface {
- // Probably a bad idea without some king of filter or at least pagination
- // TODO: add filter/pagination
- ListAllPools(ctx context.Context) ([]params.Pool, error)
- GetPoolByID(ctx context.Context, poolID string) (params.Pool, error)
- DeletePoolByID(ctx context.Context, poolID string) error
-
- ListPoolInstances(ctx context.Context, poolID string) ([]params.Instance, error)
-
- PoolInstanceCount(ctx context.Context, poolID string) (int64, error)
- GetPoolInstanceByName(ctx context.Context, poolID string, instanceName string) (params.Instance, error)
- FindPoolsMatchingAllTags(ctx context.Context, entityType params.PoolType, entityID string, tags []string) ([]params.Pool, error)
-}
-
-type UserStore interface {
- GetUser(ctx context.Context, user string) (params.User, error)
- GetUserByID(ctx context.Context, userID string) (params.User, error)
-
- CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error)
- UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error)
- HasAdminUser(ctx context.Context) bool
-}
-
-type InstanceStore interface {
- CreateInstance(ctx context.Context, poolID string, param params.CreateInstanceParams) (params.Instance, error)
- DeleteInstance(ctx context.Context, poolID string, instanceName string) error
- UpdateInstance(ctx context.Context, instanceID string, param params.UpdateInstanceParams) (params.Instance, error)
-
- // Probably a bad idea without some king of filter or at least pagination
- // TODO: add filter/pagination
- ListAllInstances(ctx context.Context) ([]params.Instance, error)
-
- GetInstanceByName(ctx context.Context, instanceName string) (params.Instance, error)
- AddInstanceEvent(ctx context.Context, instanceID string, event params.EventType, eventLevel params.EventLevel, eventMessage string) error
- ListInstanceEvents(ctx context.Context, instanceID string, eventType params.EventType, eventLevel params.EventLevel) ([]params.StatusMessage, error)
-}
-
-type JobsStore interface {
- CreateOrUpdateJob(ctx context.Context, job params.Job) (params.Job, error)
- ListEntityJobsByStatus(ctx context.Context, entityType params.PoolType, entityID string, status params.JobStatus) ([]params.Job, error)
- ListJobsByStatus(ctx context.Context, status params.JobStatus) ([]params.Job, error)
- ListAllJobs(ctx context.Context) ([]params.Job, error)
-
- GetJobByID(ctx context.Context, jobID int64) (params.Job, error)
- DeleteJob(ctx context.Context, jobID int64) error
- UnlockJob(ctx context.Context, jobID int64, entityID string) error
- LockJob(ctx context.Context, jobID int64, entityID string) error
- BreakLockJobIsQueued(ctx context.Context, jobID int64) error
-
- DeleteCompletedJobs(ctx context.Context) error
-}
-
-//go:generate mockery --name=Store
-type Store interface {
- RepoStore
- OrgStore
- EnterpriseStore
- PoolStore
- UserStore
- InstanceStore
- JobsStore
-
- ControllerInfo() (params.ControllerInfo, error)
- InitController() (params.ControllerInfo, error)
-}
diff --git a/database/common/errors.go b/database/common/errors.go
new file mode 100644
index 00000000..5e6a5087
--- /dev/null
+++ b/database/common/errors.go
@@ -0,0 +1,29 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import "fmt"
+
+var (
+ ErrProducerClosed = fmt.Errorf("producer is closed")
+ ErrProducerTimeoutErr = fmt.Errorf("producer timeout error")
+ ErrProducerAlreadyRegistered = fmt.Errorf("producer already registered")
+ ErrConsumerAlreadyRegistered = fmt.Errorf("consumer already registered")
+ ErrWatcherAlreadyStarted = fmt.Errorf("watcher already started")
+ ErrWatcherNotInitialized = fmt.Errorf("watcher not initialized")
+ ErrInvalidOperationType = fmt.Errorf("invalid operation")
+ ErrInvalidEntityType = fmt.Errorf("invalid entity type")
+ ErrNoFiltersProvided = fmt.Errorf("no filters provided")
+)
diff --git a/database/common/mocks/Store.go b/database/common/mocks/Store.go
index da21f1a7..024a1271 100644
--- a/database/common/mocks/Store.go
+++ b/database/common/mocks/Store.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
@@ -14,13 +14,25 @@ type Store struct {
mock.Mock
}
-// AddInstanceEvent provides a mock function with given fields: ctx, instanceID, event, eventLevel, eventMessage
-func (_m *Store) AddInstanceEvent(ctx context.Context, instanceID string, event params.EventType, eventLevel params.EventLevel, eventMessage string) error {
- ret := _m.Called(ctx, instanceID, event, eventLevel, eventMessage)
+type Store_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *Store) EXPECT() *Store_Expecter {
+ return &Store_Expecter{mock: &_m.Mock}
+}
+
+// AddEntityEvent provides a mock function with given fields: ctx, entity, event, eventLevel, statusMessage, maxEvents
+func (_m *Store) AddEntityEvent(ctx context.Context, entity params.ForgeEntity, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ ret := _m.Called(ctx, entity, event, eventLevel, statusMessage, maxEvents)
+
+ if len(ret) == 0 {
+ panic("no return value specified for AddEntityEvent")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.EventType, params.EventLevel, string) error); ok {
- r0 = rf(ctx, instanceID, event, eventLevel, eventMessage)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.EventType, params.EventLevel, string, int) error); ok {
+ r0 = rf(ctx, entity, event, eventLevel, statusMessage, maxEvents)
} else {
r0 = ret.Error(0)
}
@@ -28,10 +40,97 @@ func (_m *Store) AddInstanceEvent(ctx context.Context, instanceID string, event
return r0
}
+// Store_AddEntityEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddEntityEvent'
+type Store_AddEntityEvent_Call struct {
+ *mock.Call
+}
+
+// AddEntityEvent is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - event params.EventType
+// - eventLevel params.EventLevel
+// - statusMessage string
+// - maxEvents int
+func (_e *Store_Expecter) AddEntityEvent(ctx interface{}, entity interface{}, event interface{}, eventLevel interface{}, statusMessage interface{}, maxEvents interface{}) *Store_AddEntityEvent_Call {
+ return &Store_AddEntityEvent_Call{Call: _e.mock.On("AddEntityEvent", ctx, entity, event, eventLevel, statusMessage, maxEvents)}
+}
+
+func (_c *Store_AddEntityEvent_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int)) *Store_AddEntityEvent_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(params.EventType), args[3].(params.EventLevel), args[4].(string), args[5].(int))
+ })
+ return _c
+}
+
+func (_c *Store_AddEntityEvent_Call) Return(_a0 error) *Store_AddEntityEvent_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_AddEntityEvent_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, params.EventType, params.EventLevel, string, int) error) *Store_AddEntityEvent_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// AddInstanceEvent provides a mock function with given fields: ctx, instanceNameOrID, event, eventLevel, eventMessage
+func (_m *Store) AddInstanceEvent(ctx context.Context, instanceNameOrID string, event params.EventType, eventLevel params.EventLevel, eventMessage string) error {
+ ret := _m.Called(ctx, instanceNameOrID, event, eventLevel, eventMessage)
+
+ if len(ret) == 0 {
+ panic("no return value specified for AddInstanceEvent")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.EventType, params.EventLevel, string) error); ok {
+ r0 = rf(ctx, instanceNameOrID, event, eventLevel, eventMessage)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_AddInstanceEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddInstanceEvent'
+type Store_AddInstanceEvent_Call struct {
+ *mock.Call
+}
+
+// AddInstanceEvent is a helper method to define mock.On call
+// - ctx context.Context
+// - instanceNameOrID string
+// - event params.EventType
+// - eventLevel params.EventLevel
+// - eventMessage string
+func (_e *Store_Expecter) AddInstanceEvent(ctx interface{}, instanceNameOrID interface{}, event interface{}, eventLevel interface{}, eventMessage interface{}) *Store_AddInstanceEvent_Call {
+ return &Store_AddInstanceEvent_Call{Call: _e.mock.On("AddInstanceEvent", ctx, instanceNameOrID, event, eventLevel, eventMessage)}
+}
+
+func (_c *Store_AddInstanceEvent_Call) Run(run func(ctx context.Context, instanceNameOrID string, event params.EventType, eventLevel params.EventLevel, eventMessage string)) *Store_AddInstanceEvent_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.EventType), args[3].(params.EventLevel), args[4].(string))
+ })
+ return _c
+}
+
+func (_c *Store_AddInstanceEvent_Call) Return(_a0 error) *Store_AddInstanceEvent_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_AddInstanceEvent_Call) RunAndReturn(run func(context.Context, string, params.EventType, params.EventLevel, string) error) *Store_AddInstanceEvent_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// BreakLockJobIsQueued provides a mock function with given fields: ctx, jobID
func (_m *Store) BreakLockJobIsQueued(ctx context.Context, jobID int64) error {
ret := _m.Called(ctx, jobID)
+ if len(ret) == 0 {
+ panic("no return value specified for BreakLockJobIsQueued")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
r0 = rf(ctx, jobID)
@@ -42,10 +141,43 @@ func (_m *Store) BreakLockJobIsQueued(ctx context.Context, jobID int64) error {
return r0
}
-// ControllerInfo provides a mock function with given fields:
+// Store_BreakLockJobIsQueued_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BreakLockJobIsQueued'
+type Store_BreakLockJobIsQueued_Call struct {
+ *mock.Call
+}
+
+// BreakLockJobIsQueued is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+func (_e *Store_Expecter) BreakLockJobIsQueued(ctx interface{}, jobID interface{}) *Store_BreakLockJobIsQueued_Call {
+ return &Store_BreakLockJobIsQueued_Call{Call: _e.mock.On("BreakLockJobIsQueued", ctx, jobID)}
+}
+
+func (_c *Store_BreakLockJobIsQueued_Call) Run(run func(ctx context.Context, jobID int64)) *Store_BreakLockJobIsQueued_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Store_BreakLockJobIsQueued_Call) Return(_a0 error) *Store_BreakLockJobIsQueued_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_BreakLockJobIsQueued_Call) RunAndReturn(run func(context.Context, int64) error) *Store_BreakLockJobIsQueued_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ControllerInfo provides a mock function with no fields
func (_m *Store) ControllerInfo() (params.ControllerInfo, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for ControllerInfo")
+ }
+
var r0 params.ControllerInfo
var r1 error
if rf, ok := ret.Get(0).(func() (params.ControllerInfo, error)); ok {
@@ -66,23 +198,54 @@ func (_m *Store) ControllerInfo() (params.ControllerInfo, error) {
return r0, r1
}
-// CreateEnterprise provides a mock function with given fields: ctx, name, credentialsName, webhookSecret
-func (_m *Store) CreateEnterprise(ctx context.Context, name string, credentialsName string, webhookSecret string) (params.Enterprise, error) {
- ret := _m.Called(ctx, name, credentialsName, webhookSecret)
+// Store_ControllerInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ControllerInfo'
+type Store_ControllerInfo_Call struct {
+ *mock.Call
+}
+
+// ControllerInfo is a helper method to define mock.On call
+func (_e *Store_Expecter) ControllerInfo() *Store_ControllerInfo_Call {
+ return &Store_ControllerInfo_Call{Call: _e.mock.On("ControllerInfo")}
+}
+
+func (_c *Store_ControllerInfo_Call) Run(run func()) *Store_ControllerInfo_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Store_ControllerInfo_Call) Return(_a0 params.ControllerInfo, _a1 error) *Store_ControllerInfo_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ControllerInfo_Call) RunAndReturn(run func() (params.ControllerInfo, error)) *Store_ControllerInfo_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEnterprise provides a mock function with given fields: ctx, name, credentialsName, webhookSecret, poolBalancerType
+func (_m *Store) CreateEnterprise(ctx context.Context, name string, credentialsName params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (params.Enterprise, error) {
+ ret := _m.Called(ctx, name, credentialsName, webhookSecret, poolBalancerType)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEnterprise")
+ }
var r0 params.Enterprise
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (params.Enterprise, error)); ok {
- return rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Enterprise, error)); ok {
+ return rf(ctx, name, credentialsName, webhookSecret, poolBalancerType)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) params.Enterprise); ok {
- r0 = rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) params.Enterprise); ok {
+ r0 = rf(ctx, name, credentialsName, webhookSecret, poolBalancerType)
} else {
r0 = ret.Get(0).(params.Enterprise)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
- r1 = rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) error); ok {
+ r1 = rf(ctx, name, credentialsName, webhookSecret, poolBalancerType)
} else {
r1 = ret.Error(1)
}
@@ -90,23 +253,59 @@ func (_m *Store) CreateEnterprise(ctx context.Context, name string, credentialsN
return r0, r1
}
-// CreateEnterprisePool provides a mock function with given fields: ctx, enterpriseID, param
-func (_m *Store) CreateEnterprisePool(ctx context.Context, enterpriseID string, param params.CreatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID, param)
+// Store_CreateEnterprise_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEnterprise'
+type Store_CreateEnterprise_Call struct {
+ *mock.Call
+}
+
+// CreateEnterprise is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - credentialsName params.ForgeCredentials
+// - webhookSecret string
+// - poolBalancerType params.PoolBalancerType
+func (_e *Store_Expecter) CreateEnterprise(ctx interface{}, name interface{}, credentialsName interface{}, webhookSecret interface{}, poolBalancerType interface{}) *Store_CreateEnterprise_Call {
+ return &Store_CreateEnterprise_Call{Call: _e.mock.On("CreateEnterprise", ctx, name, credentialsName, webhookSecret, poolBalancerType)}
+}
+
+func (_c *Store_CreateEnterprise_Call) Run(run func(ctx context.Context, name string, credentialsName params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType)) *Store_CreateEnterprise_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.ForgeCredentials), args[3].(string), args[4].(params.PoolBalancerType))
+ })
+ return _c
+}
+
+func (_c *Store_CreateEnterprise_Call) Return(_a0 params.Enterprise, _a1 error) *Store_CreateEnterprise_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateEnterprise_Call) RunAndReturn(run func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Enterprise, error)) *Store_CreateEnterprise_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEntityPool provides a mock function with given fields: ctx, entity, param
+func (_m *Store) CreateEntityPool(ctx context.Context, entity params.ForgeEntity, param params.CreatePoolParams) (params.Pool, error) {
+ ret := _m.Called(ctx, entity, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityPool")
+ }
var r0 params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, enterpriseID, param)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.CreatePoolParams) (params.Pool, error)); ok {
+ return rf(ctx, entity, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) params.Pool); ok {
- r0 = rf(ctx, enterpriseID, param)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.CreatePoolParams) params.Pool); ok {
+ r0 = rf(ctx, entity, param)
} else {
r0 = ret.Get(0).(params.Pool)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, params.CreatePoolParams) error); ok {
- r1 = rf(ctx, enterpriseID, param)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, params.CreatePoolParams) error); ok {
+ r1 = rf(ctx, entity, param)
} else {
r1 = ret.Error(1)
}
@@ -114,10 +313,330 @@ func (_m *Store) CreateEnterprisePool(ctx context.Context, enterpriseID string,
return r0, r1
}
+// Store_CreateEntityPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityPool'
+type Store_CreateEntityPool_Call struct {
+ *mock.Call
+}
+
+// CreateEntityPool is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - param params.CreatePoolParams
+func (_e *Store_Expecter) CreateEntityPool(ctx interface{}, entity interface{}, param interface{}) *Store_CreateEntityPool_Call {
+ return &Store_CreateEntityPool_Call{Call: _e.mock.On("CreateEntityPool", ctx, entity, param)}
+}
+
+func (_c *Store_CreateEntityPool_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, param params.CreatePoolParams)) *Store_CreateEntityPool_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(params.CreatePoolParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateEntityPool_Call) Return(_a0 params.Pool, _a1 error) *Store_CreateEntityPool_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateEntityPool_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, params.CreatePoolParams) (params.Pool, error)) *Store_CreateEntityPool_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEntityScaleSet provides a mock function with given fields: _a0, entity, param
+func (_m *Store) CreateEntityScaleSet(_a0 context.Context, entity params.ForgeEntity, param params.CreateScaleSetParams) (params.ScaleSet, error) {
+ ret := _m.Called(_a0, entity, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityScaleSet")
+ }
+
+ var r0 params.ScaleSet
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.CreateScaleSetParams) (params.ScaleSet, error)); ok {
+ return rf(_a0, entity, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.CreateScaleSetParams) params.ScaleSet); ok {
+ r0 = rf(_a0, entity, param)
+ } else {
+ r0 = ret.Get(0).(params.ScaleSet)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, params.CreateScaleSetParams) error); ok {
+ r1 = rf(_a0, entity, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateEntityScaleSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityScaleSet'
+type Store_CreateEntityScaleSet_Call struct {
+ *mock.Call
+}
+
+// CreateEntityScaleSet is a helper method to define mock.On call
+// - _a0 context.Context
+// - entity params.ForgeEntity
+// - param params.CreateScaleSetParams
+func (_e *Store_Expecter) CreateEntityScaleSet(_a0 interface{}, entity interface{}, param interface{}) *Store_CreateEntityScaleSet_Call {
+ return &Store_CreateEntityScaleSet_Call{Call: _e.mock.On("CreateEntityScaleSet", _a0, entity, param)}
+}
+
+func (_c *Store_CreateEntityScaleSet_Call) Run(run func(_a0 context.Context, entity params.ForgeEntity, param params.CreateScaleSetParams)) *Store_CreateEntityScaleSet_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(params.CreateScaleSetParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateEntityScaleSet_Call) Return(scaleSet params.ScaleSet, err error) *Store_CreateEntityScaleSet_Call {
+ _c.Call.Return(scaleSet, err)
+ return _c
+}
+
+func (_c *Store_CreateEntityScaleSet_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, params.CreateScaleSetParams) (params.ScaleSet, error)) *Store_CreateEntityScaleSet_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateGiteaCredentials provides a mock function with given fields: ctx, param
+func (_m *Store) CreateGiteaCredentials(ctx context.Context, param params.CreateGiteaCredentialsParams) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateGiteaCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGiteaCredentialsParams) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGiteaCredentialsParams) params.ForgeCredentials); ok {
+ r0 = rf(ctx, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.CreateGiteaCredentialsParams) error); ok {
+ r1 = rf(ctx, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateGiteaCredentials'
+type Store_CreateGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// CreateGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - param params.CreateGiteaCredentialsParams
+func (_e *Store_Expecter) CreateGiteaCredentials(ctx interface{}, param interface{}) *Store_CreateGiteaCredentials_Call {
+ return &Store_CreateGiteaCredentials_Call{Call: _e.mock.On("CreateGiteaCredentials", ctx, param)}
+}
+
+func (_c *Store_CreateGiteaCredentials_Call) Run(run func(ctx context.Context, param params.CreateGiteaCredentialsParams)) *Store_CreateGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.CreateGiteaCredentialsParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateGiteaCredentials_Call) Return(gtCreds params.ForgeCredentials, err error) *Store_CreateGiteaCredentials_Call {
+ _c.Call.Return(gtCreds, err)
+ return _c
+}
+
+func (_c *Store_CreateGiteaCredentials_Call) RunAndReturn(run func(context.Context, params.CreateGiteaCredentialsParams) (params.ForgeCredentials, error)) *Store_CreateGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateGiteaEndpoint provides a mock function with given fields: _a0, param
+func (_m *Store) CreateGiteaEndpoint(_a0 context.Context, param params.CreateGiteaEndpointParams) (params.ForgeEndpoint, error) {
+ ret := _m.Called(_a0, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateGiteaEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGiteaEndpointParams) (params.ForgeEndpoint, error)); ok {
+ return rf(_a0, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGiteaEndpointParams) params.ForgeEndpoint); ok {
+ r0 = rf(_a0, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.CreateGiteaEndpointParams) error); ok {
+ r1 = rf(_a0, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateGiteaEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateGiteaEndpoint'
+type Store_CreateGiteaEndpoint_Call struct {
+ *mock.Call
+}
+
+// CreateGiteaEndpoint is a helper method to define mock.On call
+// - _a0 context.Context
+// - param params.CreateGiteaEndpointParams
+func (_e *Store_Expecter) CreateGiteaEndpoint(_a0 interface{}, param interface{}) *Store_CreateGiteaEndpoint_Call {
+ return &Store_CreateGiteaEndpoint_Call{Call: _e.mock.On("CreateGiteaEndpoint", _a0, param)}
+}
+
+func (_c *Store_CreateGiteaEndpoint_Call) Run(run func(_a0 context.Context, param params.CreateGiteaEndpointParams)) *Store_CreateGiteaEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.CreateGiteaEndpointParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateGiteaEndpoint_Call) Return(ghEndpoint params.ForgeEndpoint, err error) *Store_CreateGiteaEndpoint_Call {
+ _c.Call.Return(ghEndpoint, err)
+ return _c
+}
+
+func (_c *Store_CreateGiteaEndpoint_Call) RunAndReturn(run func(context.Context, params.CreateGiteaEndpointParams) (params.ForgeEndpoint, error)) *Store_CreateGiteaEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateGithubCredentials provides a mock function with given fields: ctx, param
+func (_m *Store) CreateGithubCredentials(ctx context.Context, param params.CreateGithubCredentialsParams) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateGithubCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGithubCredentialsParams) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGithubCredentialsParams) params.ForgeCredentials); ok {
+ r0 = rf(ctx, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.CreateGithubCredentialsParams) error); ok {
+ r1 = rf(ctx, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateGithubCredentials'
+type Store_CreateGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// CreateGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - param params.CreateGithubCredentialsParams
+func (_e *Store_Expecter) CreateGithubCredentials(ctx interface{}, param interface{}) *Store_CreateGithubCredentials_Call {
+ return &Store_CreateGithubCredentials_Call{Call: _e.mock.On("CreateGithubCredentials", ctx, param)}
+}
+
+func (_c *Store_CreateGithubCredentials_Call) Run(run func(ctx context.Context, param params.CreateGithubCredentialsParams)) *Store_CreateGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.CreateGithubCredentialsParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateGithubCredentials_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_CreateGithubCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateGithubCredentials_Call) RunAndReturn(run func(context.Context, params.CreateGithubCredentialsParams) (params.ForgeCredentials, error)) *Store_CreateGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateGithubEndpoint provides a mock function with given fields: ctx, param
+func (_m *Store) CreateGithubEndpoint(ctx context.Context, param params.CreateGithubEndpointParams) (params.ForgeEndpoint, error) {
+ ret := _m.Called(ctx, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateGithubEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGithubEndpointParams) (params.ForgeEndpoint, error)); ok {
+ return rf(ctx, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGithubEndpointParams) params.ForgeEndpoint); ok {
+ r0 = rf(ctx, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.CreateGithubEndpointParams) error); ok {
+ r1 = rf(ctx, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateGithubEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateGithubEndpoint'
+type Store_CreateGithubEndpoint_Call struct {
+ *mock.Call
+}
+
+// CreateGithubEndpoint is a helper method to define mock.On call
+// - ctx context.Context
+// - param params.CreateGithubEndpointParams
+func (_e *Store_Expecter) CreateGithubEndpoint(ctx interface{}, param interface{}) *Store_CreateGithubEndpoint_Call {
+ return &Store_CreateGithubEndpoint_Call{Call: _e.mock.On("CreateGithubEndpoint", ctx, param)}
+}
+
+func (_c *Store_CreateGithubEndpoint_Call) Run(run func(ctx context.Context, param params.CreateGithubEndpointParams)) *Store_CreateGithubEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.CreateGithubEndpointParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateGithubEndpoint_Call) Return(_a0 params.ForgeEndpoint, _a1 error) *Store_CreateGithubEndpoint_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateGithubEndpoint_Call) RunAndReturn(run func(context.Context, params.CreateGithubEndpointParams) (params.ForgeEndpoint, error)) *Store_CreateGithubEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateInstance provides a mock function with given fields: ctx, poolID, param
func (_m *Store) CreateInstance(ctx context.Context, poolID string, param params.CreateInstanceParams) (params.Instance, error) {
ret := _m.Called(ctx, poolID, param)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateInstance")
+ }
+
var r0 params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.CreateInstanceParams) (params.Instance, error)); ok {
@@ -138,10 +657,44 @@ func (_m *Store) CreateInstance(ctx context.Context, poolID string, param params
return r0, r1
}
+// Store_CreateInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateInstance'
+type Store_CreateInstance_Call struct {
+ *mock.Call
+}
+
+// CreateInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+// - param params.CreateInstanceParams
+func (_e *Store_Expecter) CreateInstance(ctx interface{}, poolID interface{}, param interface{}) *Store_CreateInstance_Call {
+ return &Store_CreateInstance_Call{Call: _e.mock.On("CreateInstance", ctx, poolID, param)}
+}
+
+func (_c *Store_CreateInstance_Call) Run(run func(ctx context.Context, poolID string, param params.CreateInstanceParams)) *Store_CreateInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.CreateInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateInstance_Call) Return(_a0 params.Instance, _a1 error) *Store_CreateInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateInstance_Call) RunAndReturn(run func(context.Context, string, params.CreateInstanceParams) (params.Instance, error)) *Store_CreateInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateOrUpdateJob provides a mock function with given fields: ctx, job
func (_m *Store) CreateOrUpdateJob(ctx context.Context, job params.Job) (params.Job, error) {
ret := _m.Called(ctx, job)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateOrUpdateJob")
+ }
+
var r0 params.Job
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.Job) (params.Job, error)); ok {
@@ -162,23 +715,56 @@ func (_m *Store) CreateOrUpdateJob(ctx context.Context, job params.Job) (params.
return r0, r1
}
-// CreateOrganization provides a mock function with given fields: ctx, name, credentialsName, webhookSecret
-func (_m *Store) CreateOrganization(ctx context.Context, name string, credentialsName string, webhookSecret string) (params.Organization, error) {
- ret := _m.Called(ctx, name, credentialsName, webhookSecret)
+// Store_CreateOrUpdateJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateOrUpdateJob'
+type Store_CreateOrUpdateJob_Call struct {
+ *mock.Call
+}
+
+// CreateOrUpdateJob is a helper method to define mock.On call
+// - ctx context.Context
+// - job params.Job
+func (_e *Store_Expecter) CreateOrUpdateJob(ctx interface{}, job interface{}) *Store_CreateOrUpdateJob_Call {
+ return &Store_CreateOrUpdateJob_Call{Call: _e.mock.On("CreateOrUpdateJob", ctx, job)}
+}
+
+func (_c *Store_CreateOrUpdateJob_Call) Run(run func(ctx context.Context, job params.Job)) *Store_CreateOrUpdateJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.Job))
+ })
+ return _c
+}
+
+func (_c *Store_CreateOrUpdateJob_Call) Return(_a0 params.Job, _a1 error) *Store_CreateOrUpdateJob_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateOrUpdateJob_Call) RunAndReturn(run func(context.Context, params.Job) (params.Job, error)) *Store_CreateOrUpdateJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateOrganization provides a mock function with given fields: ctx, name, credentials, webhookSecret, poolBalancerType
+func (_m *Store) CreateOrganization(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (params.Organization, error) {
+ ret := _m.Called(ctx, name, credentials, webhookSecret, poolBalancerType)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateOrganization")
+ }
var r0 params.Organization
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (params.Organization, error)); ok {
- return rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Organization, error)); ok {
+ return rf(ctx, name, credentials, webhookSecret, poolBalancerType)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) params.Organization); ok {
- r0 = rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) params.Organization); ok {
+ r0 = rf(ctx, name, credentials, webhookSecret, poolBalancerType)
} else {
r0 = ret.Get(0).(params.Organization)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
- r1 = rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) error); ok {
+ r1 = rf(ctx, name, credentials, webhookSecret, poolBalancerType)
} else {
r1 = ret.Error(1)
}
@@ -186,47 +772,59 @@ func (_m *Store) CreateOrganization(ctx context.Context, name string, credential
return r0, r1
}
-// CreateOrganizationPool provides a mock function with given fields: ctx, orgId, param
-func (_m *Store) CreateOrganizationPool(ctx context.Context, orgId string, param params.CreatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, orgId, param)
-
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, orgId, param)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) params.Pool); ok {
- r0 = rf(ctx, orgId, param)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, params.CreatePoolParams) error); ok {
- r1 = rf(ctx, orgId, param)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_CreateOrganization_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateOrganization'
+type Store_CreateOrganization_Call struct {
+ *mock.Call
}
-// CreateRepository provides a mock function with given fields: ctx, owner, name, credentialsName, webhookSecret
-func (_m *Store) CreateRepository(ctx context.Context, owner string, name string, credentialsName string, webhookSecret string) (params.Repository, error) {
- ret := _m.Called(ctx, owner, name, credentialsName, webhookSecret)
+// CreateOrganization is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - credentials params.ForgeCredentials
+// - webhookSecret string
+// - poolBalancerType params.PoolBalancerType
+func (_e *Store_Expecter) CreateOrganization(ctx interface{}, name interface{}, credentials interface{}, webhookSecret interface{}, poolBalancerType interface{}) *Store_CreateOrganization_Call {
+ return &Store_CreateOrganization_Call{Call: _e.mock.On("CreateOrganization", ctx, name, credentials, webhookSecret, poolBalancerType)}
+}
+
+func (_c *Store_CreateOrganization_Call) Run(run func(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType)) *Store_CreateOrganization_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.ForgeCredentials), args[3].(string), args[4].(params.PoolBalancerType))
+ })
+ return _c
+}
+
+func (_c *Store_CreateOrganization_Call) Return(org params.Organization, err error) *Store_CreateOrganization_Call {
+ _c.Call.Return(org, err)
+ return _c
+}
+
+func (_c *Store_CreateOrganization_Call) RunAndReturn(run func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Organization, error)) *Store_CreateOrganization_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateRepository provides a mock function with given fields: ctx, owner, name, credentials, webhookSecret, poolBalancerType
+func (_m *Store) CreateRepository(ctx context.Context, owner string, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (params.Repository, error) {
+ ret := _m.Called(ctx, owner, name, credentials, webhookSecret, poolBalancerType)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateRepository")
+ }
var r0 params.Repository
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) (params.Repository, error)); ok {
- return rf(ctx, owner, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Repository, error)); ok {
+ return rf(ctx, owner, name, credentials, webhookSecret, poolBalancerType)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) params.Repository); ok {
- r0 = rf(ctx, owner, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, params.ForgeCredentials, string, params.PoolBalancerType) params.Repository); ok {
+ r0 = rf(ctx, owner, name, credentials, webhookSecret, poolBalancerType)
} else {
r0 = ret.Get(0).(params.Repository)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok {
- r1 = rf(ctx, owner, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, params.ForgeCredentials, string, params.PoolBalancerType) error); ok {
+ r1 = rf(ctx, owner, name, credentials, webhookSecret, poolBalancerType)
} else {
r1 = ret.Error(1)
}
@@ -234,23 +832,60 @@ func (_m *Store) CreateRepository(ctx context.Context, owner string, name string
return r0, r1
}
-// CreateRepositoryPool provides a mock function with given fields: ctx, repoId, param
-func (_m *Store) CreateRepositoryPool(ctx context.Context, repoId string, param params.CreatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, repoId, param)
+// Store_CreateRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateRepository'
+type Store_CreateRepository_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
+// CreateRepository is a helper method to define mock.On call
+// - ctx context.Context
+// - owner string
+// - name string
+// - credentials params.ForgeCredentials
+// - webhookSecret string
+// - poolBalancerType params.PoolBalancerType
+func (_e *Store_Expecter) CreateRepository(ctx interface{}, owner interface{}, name interface{}, credentials interface{}, webhookSecret interface{}, poolBalancerType interface{}) *Store_CreateRepository_Call {
+ return &Store_CreateRepository_Call{Call: _e.mock.On("CreateRepository", ctx, owner, name, credentials, webhookSecret, poolBalancerType)}
+}
+
+func (_c *Store_CreateRepository_Call) Run(run func(ctx context.Context, owner string, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType)) *Store_CreateRepository_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(params.ForgeCredentials), args[4].(string), args[5].(params.PoolBalancerType))
+ })
+ return _c
+}
+
+func (_c *Store_CreateRepository_Call) Return(param params.Repository, err error) *Store_CreateRepository_Call {
+ _c.Call.Return(param, err)
+ return _c
+}
+
+func (_c *Store_CreateRepository_Call) RunAndReturn(run func(context.Context, string, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Repository, error)) *Store_CreateRepository_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateScaleSetInstance provides a mock function with given fields: _a0, scaleSetID, param
+func (_m *Store) CreateScaleSetInstance(_a0 context.Context, scaleSetID uint, param params.CreateInstanceParams) (params.Instance, error) {
+ ret := _m.Called(_a0, scaleSetID, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateScaleSetInstance")
+ }
+
+ var r0 params.Instance
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, repoId, param)
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.CreateInstanceParams) (params.Instance, error)); ok {
+ return rf(_a0, scaleSetID, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) params.Pool); ok {
- r0 = rf(ctx, repoId, param)
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.CreateInstanceParams) params.Instance); ok {
+ r0 = rf(_a0, scaleSetID, param)
} else {
- r0 = ret.Get(0).(params.Pool)
+ r0 = ret.Get(0).(params.Instance)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, params.CreatePoolParams) error); ok {
- r1 = rf(ctx, repoId, param)
+ if rf, ok := ret.Get(1).(func(context.Context, uint, params.CreateInstanceParams) error); ok {
+ r1 = rf(_a0, scaleSetID, param)
} else {
r1 = ret.Error(1)
}
@@ -258,10 +893,44 @@ func (_m *Store) CreateRepositoryPool(ctx context.Context, repoId string, param
return r0, r1
}
+// Store_CreateScaleSetInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateScaleSetInstance'
+type Store_CreateScaleSetInstance_Call struct {
+ *mock.Call
+}
+
+// CreateScaleSetInstance is a helper method to define mock.On call
+// - _a0 context.Context
+// - scaleSetID uint
+// - param params.CreateInstanceParams
+func (_e *Store_Expecter) CreateScaleSetInstance(_a0 interface{}, scaleSetID interface{}, param interface{}) *Store_CreateScaleSetInstance_Call {
+ return &Store_CreateScaleSetInstance_Call{Call: _e.mock.On("CreateScaleSetInstance", _a0, scaleSetID, param)}
+}
+
+func (_c *Store_CreateScaleSetInstance_Call) Run(run func(_a0 context.Context, scaleSetID uint, param params.CreateInstanceParams)) *Store_CreateScaleSetInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(params.CreateInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateScaleSetInstance_Call) Return(instance params.Instance, err error) *Store_CreateScaleSetInstance_Call {
+ _c.Call.Return(instance, err)
+ return _c
+}
+
+func (_c *Store_CreateScaleSetInstance_Call) RunAndReturn(run func(context.Context, uint, params.CreateInstanceParams) (params.Instance, error)) *Store_CreateScaleSetInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateUser provides a mock function with given fields: ctx, user
func (_m *Store) CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error) {
ret := _m.Called(ctx, user)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateUser")
+ }
+
var r0 params.User
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.NewUserParams) (params.User, error)); ok {
@@ -282,10 +951,43 @@ func (_m *Store) CreateUser(ctx context.Context, user params.NewUserParams) (par
return r0, r1
}
+// Store_CreateUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateUser'
+type Store_CreateUser_Call struct {
+ *mock.Call
+}
+
+// CreateUser is a helper method to define mock.On call
+// - ctx context.Context
+// - user params.NewUserParams
+func (_e *Store_Expecter) CreateUser(ctx interface{}, user interface{}) *Store_CreateUser_Call {
+ return &Store_CreateUser_Call{Call: _e.mock.On("CreateUser", ctx, user)}
+}
+
+func (_c *Store_CreateUser_Call) Run(run func(ctx context.Context, user params.NewUserParams)) *Store_CreateUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.NewUserParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateUser_Call) Return(_a0 params.User, _a1 error) *Store_CreateUser_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateUser_Call) RunAndReturn(run func(context.Context, params.NewUserParams) (params.User, error)) *Store_CreateUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteCompletedJobs provides a mock function with given fields: ctx
func (_m *Store) DeleteCompletedJobs(ctx context.Context) error {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteCompletedJobs")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
@@ -296,10 +998,42 @@ func (_m *Store) DeleteCompletedJobs(ctx context.Context) error {
return r0
}
+// Store_DeleteCompletedJobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCompletedJobs'
+type Store_DeleteCompletedJobs_Call struct {
+ *mock.Call
+}
+
+// DeleteCompletedJobs is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) DeleteCompletedJobs(ctx interface{}) *Store_DeleteCompletedJobs_Call {
+ return &Store_DeleteCompletedJobs_Call{Call: _e.mock.On("DeleteCompletedJobs", ctx)}
+}
+
+func (_c *Store_DeleteCompletedJobs_Call) Run(run func(ctx context.Context)) *Store_DeleteCompletedJobs_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteCompletedJobs_Call) Return(_a0 error) *Store_DeleteCompletedJobs_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteCompletedJobs_Call) RunAndReturn(run func(context.Context) error) *Store_DeleteCompletedJobs_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteEnterprise provides a mock function with given fields: ctx, enterpriseID
func (_m *Store) DeleteEnterprise(ctx context.Context, enterpriseID string) error {
ret := _m.Called(ctx, enterpriseID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEnterprise")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, enterpriseID)
@@ -310,13 +1044,46 @@ func (_m *Store) DeleteEnterprise(ctx context.Context, enterpriseID string) erro
return r0
}
-// DeleteEnterprisePool provides a mock function with given fields: ctx, enterpriseID, poolID
-func (_m *Store) DeleteEnterprisePool(ctx context.Context, enterpriseID string, poolID string) error {
- ret := _m.Called(ctx, enterpriseID, poolID)
+// Store_DeleteEnterprise_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEnterprise'
+type Store_DeleteEnterprise_Call struct {
+ *mock.Call
+}
+
+// DeleteEnterprise is a helper method to define mock.On call
+// - ctx context.Context
+// - enterpriseID string
+func (_e *Store_Expecter) DeleteEnterprise(ctx interface{}, enterpriseID interface{}) *Store_DeleteEnterprise_Call {
+ return &Store_DeleteEnterprise_Call{Call: _e.mock.On("DeleteEnterprise", ctx, enterpriseID)}
+}
+
+func (_c *Store_DeleteEnterprise_Call) Run(run func(ctx context.Context, enterpriseID string)) *Store_DeleteEnterprise_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteEnterprise_Call) Return(_a0 error) *Store_DeleteEnterprise_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteEnterprise_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteEnterprise_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteEntityPool provides a mock function with given fields: ctx, entity, poolID
+func (_m *Store) DeleteEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string) error {
+ ret := _m.Called(ctx, entity, poolID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEntityPool")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
- r0 = rf(ctx, enterpriseID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string) error); ok {
+ r0 = rf(ctx, entity, poolID)
} else {
r0 = ret.Error(0)
}
@@ -324,13 +1091,47 @@ func (_m *Store) DeleteEnterprisePool(ctx context.Context, enterpriseID string,
return r0
}
-// DeleteInstance provides a mock function with given fields: ctx, poolID, instanceName
-func (_m *Store) DeleteInstance(ctx context.Context, poolID string, instanceName string) error {
- ret := _m.Called(ctx, poolID, instanceName)
+// Store_DeleteEntityPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEntityPool'
+type Store_DeleteEntityPool_Call struct {
+ *mock.Call
+}
+
+// DeleteEntityPool is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - poolID string
+func (_e *Store_Expecter) DeleteEntityPool(ctx interface{}, entity interface{}, poolID interface{}) *Store_DeleteEntityPool_Call {
+ return &Store_DeleteEntityPool_Call{Call: _e.mock.On("DeleteEntityPool", ctx, entity, poolID)}
+}
+
+func (_c *Store_DeleteEntityPool_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, poolID string)) *Store_DeleteEntityPool_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteEntityPool_Call) Return(_a0 error) *Store_DeleteEntityPool_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteEntityPool_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, string) error) *Store_DeleteEntityPool_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteGiteaCredentials provides a mock function with given fields: ctx, id
+func (_m *Store) DeleteGiteaCredentials(ctx context.Context, id uint) error {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteGiteaCredentials")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
- r0 = rf(ctx, poolID, instanceName)
+ if rf, ok := ret.Get(0).(func(context.Context, uint) error); ok {
+ r0 = rf(ctx, id)
} else {
r0 = ret.Error(0)
}
@@ -338,10 +1139,279 @@ func (_m *Store) DeleteInstance(ctx context.Context, poolID string, instanceName
return r0
}
+// Store_DeleteGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteGiteaCredentials'
+type Store_DeleteGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// DeleteGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+func (_e *Store_Expecter) DeleteGiteaCredentials(ctx interface{}, id interface{}) *Store_DeleteGiteaCredentials_Call {
+ return &Store_DeleteGiteaCredentials_Call{Call: _e.mock.On("DeleteGiteaCredentials", ctx, id)}
+}
+
+func (_c *Store_DeleteGiteaCredentials_Call) Run(run func(ctx context.Context, id uint)) *Store_DeleteGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteGiteaCredentials_Call) Return(err error) *Store_DeleteGiteaCredentials_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *Store_DeleteGiteaCredentials_Call) RunAndReturn(run func(context.Context, uint) error) *Store_DeleteGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteGiteaEndpoint provides a mock function with given fields: _a0, name
+func (_m *Store) DeleteGiteaEndpoint(_a0 context.Context, name string) error {
+ ret := _m.Called(_a0, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteGiteaEndpoint")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = rf(_a0, name)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteGiteaEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteGiteaEndpoint'
+type Store_DeleteGiteaEndpoint_Call struct {
+ *mock.Call
+}
+
+// DeleteGiteaEndpoint is a helper method to define mock.On call
+// - _a0 context.Context
+// - name string
+func (_e *Store_Expecter) DeleteGiteaEndpoint(_a0 interface{}, name interface{}) *Store_DeleteGiteaEndpoint_Call {
+ return &Store_DeleteGiteaEndpoint_Call{Call: _e.mock.On("DeleteGiteaEndpoint", _a0, name)}
+}
+
+func (_c *Store_DeleteGiteaEndpoint_Call) Run(run func(_a0 context.Context, name string)) *Store_DeleteGiteaEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteGiteaEndpoint_Call) Return(err error) *Store_DeleteGiteaEndpoint_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *Store_DeleteGiteaEndpoint_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteGiteaEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteGithubCredentials provides a mock function with given fields: ctx, id
+func (_m *Store) DeleteGithubCredentials(ctx context.Context, id uint) error {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteGithubCredentials")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint) error); ok {
+ r0 = rf(ctx, id)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteGithubCredentials'
+type Store_DeleteGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// DeleteGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+func (_e *Store_Expecter) DeleteGithubCredentials(ctx interface{}, id interface{}) *Store_DeleteGithubCredentials_Call {
+ return &Store_DeleteGithubCredentials_Call{Call: _e.mock.On("DeleteGithubCredentials", ctx, id)}
+}
+
+func (_c *Store_DeleteGithubCredentials_Call) Run(run func(ctx context.Context, id uint)) *Store_DeleteGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteGithubCredentials_Call) Return(_a0 error) *Store_DeleteGithubCredentials_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteGithubCredentials_Call) RunAndReturn(run func(context.Context, uint) error) *Store_DeleteGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteGithubEndpoint provides a mock function with given fields: ctx, name
+func (_m *Store) DeleteGithubEndpoint(ctx context.Context, name string) error {
+ ret := _m.Called(ctx, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteGithubEndpoint")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = rf(ctx, name)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteGithubEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteGithubEndpoint'
+type Store_DeleteGithubEndpoint_Call struct {
+ *mock.Call
+}
+
+// DeleteGithubEndpoint is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+func (_e *Store_Expecter) DeleteGithubEndpoint(ctx interface{}, name interface{}) *Store_DeleteGithubEndpoint_Call {
+ return &Store_DeleteGithubEndpoint_Call{Call: _e.mock.On("DeleteGithubEndpoint", ctx, name)}
+}
+
+func (_c *Store_DeleteGithubEndpoint_Call) Run(run func(ctx context.Context, name string)) *Store_DeleteGithubEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteGithubEndpoint_Call) Return(_a0 error) *Store_DeleteGithubEndpoint_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteGithubEndpoint_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteGithubEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteInstance provides a mock function with given fields: ctx, poolID, instanceNameOrID
+func (_m *Store) DeleteInstance(ctx context.Context, poolID string, instanceNameOrID string) error {
+ ret := _m.Called(ctx, poolID, instanceNameOrID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteInstance")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
+ r0 = rf(ctx, poolID, instanceNameOrID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteInstance'
+type Store_DeleteInstance_Call struct {
+ *mock.Call
+}
+
+// DeleteInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+// - instanceNameOrID string
+func (_e *Store_Expecter) DeleteInstance(ctx interface{}, poolID interface{}, instanceNameOrID interface{}) *Store_DeleteInstance_Call {
+ return &Store_DeleteInstance_Call{Call: _e.mock.On("DeleteInstance", ctx, poolID, instanceNameOrID)}
+}
+
+func (_c *Store_DeleteInstance_Call) Run(run func(ctx context.Context, poolID string, instanceNameOrID string)) *Store_DeleteInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteInstance_Call) Return(_a0 error) *Store_DeleteInstance_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteInstance_Call) RunAndReturn(run func(context.Context, string, string) error) *Store_DeleteInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteInstanceByName provides a mock function with given fields: ctx, instanceName
+func (_m *Store) DeleteInstanceByName(ctx context.Context, instanceName string) error {
+ ret := _m.Called(ctx, instanceName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteInstanceByName")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = rf(ctx, instanceName)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteInstanceByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteInstanceByName'
+type Store_DeleteInstanceByName_Call struct {
+ *mock.Call
+}
+
+// DeleteInstanceByName is a helper method to define mock.On call
+// - ctx context.Context
+// - instanceName string
+func (_e *Store_Expecter) DeleteInstanceByName(ctx interface{}, instanceName interface{}) *Store_DeleteInstanceByName_Call {
+ return &Store_DeleteInstanceByName_Call{Call: _e.mock.On("DeleteInstanceByName", ctx, instanceName)}
+}
+
+func (_c *Store_DeleteInstanceByName_Call) Run(run func(ctx context.Context, instanceName string)) *Store_DeleteInstanceByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteInstanceByName_Call) Return(_a0 error) *Store_DeleteInstanceByName_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteInstanceByName_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteInstanceByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteJob provides a mock function with given fields: ctx, jobID
func (_m *Store) DeleteJob(ctx context.Context, jobID int64) error {
ret := _m.Called(ctx, jobID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteJob")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
r0 = rf(ctx, jobID)
@@ -352,10 +1422,43 @@ func (_m *Store) DeleteJob(ctx context.Context, jobID int64) error {
return r0
}
+// Store_DeleteJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteJob'
+type Store_DeleteJob_Call struct {
+ *mock.Call
+}
+
+// DeleteJob is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+func (_e *Store_Expecter) DeleteJob(ctx interface{}, jobID interface{}) *Store_DeleteJob_Call {
+ return &Store_DeleteJob_Call{Call: _e.mock.On("DeleteJob", ctx, jobID)}
+}
+
+func (_c *Store_DeleteJob_Call) Run(run func(ctx context.Context, jobID int64)) *Store_DeleteJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteJob_Call) Return(_a0 error) *Store_DeleteJob_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteJob_Call) RunAndReturn(run func(context.Context, int64) error) *Store_DeleteJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteOrganization provides a mock function with given fields: ctx, orgID
func (_m *Store) DeleteOrganization(ctx context.Context, orgID string) error {
ret := _m.Called(ctx, orgID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteOrganization")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, orgID)
@@ -366,24 +1469,43 @@ func (_m *Store) DeleteOrganization(ctx context.Context, orgID string) error {
return r0
}
-// DeleteOrganizationPool provides a mock function with given fields: ctx, orgID, poolID
-func (_m *Store) DeleteOrganizationPool(ctx context.Context, orgID string, poolID string) error {
- ret := _m.Called(ctx, orgID, poolID)
+// Store_DeleteOrganization_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteOrganization'
+type Store_DeleteOrganization_Call struct {
+ *mock.Call
+}
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
- r0 = rf(ctx, orgID, poolID)
- } else {
- r0 = ret.Error(0)
- }
+// DeleteOrganization is a helper method to define mock.On call
+// - ctx context.Context
+// - orgID string
+func (_e *Store_Expecter) DeleteOrganization(ctx interface{}, orgID interface{}) *Store_DeleteOrganization_Call {
+ return &Store_DeleteOrganization_Call{Call: _e.mock.On("DeleteOrganization", ctx, orgID)}
+}
- return r0
+func (_c *Store_DeleteOrganization_Call) Run(run func(ctx context.Context, orgID string)) *Store_DeleteOrganization_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteOrganization_Call) Return(_a0 error) *Store_DeleteOrganization_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteOrganization_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteOrganization_Call {
+ _c.Call.Return(run)
+ return _c
}
// DeletePoolByID provides a mock function with given fields: ctx, poolID
func (_m *Store) DeletePoolByID(ctx context.Context, poolID string) error {
ret := _m.Called(ctx, poolID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeletePoolByID")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, poolID)
@@ -394,10 +1516,43 @@ func (_m *Store) DeletePoolByID(ctx context.Context, poolID string) error {
return r0
}
+// Store_DeletePoolByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeletePoolByID'
+type Store_DeletePoolByID_Call struct {
+ *mock.Call
+}
+
+// DeletePoolByID is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+func (_e *Store_Expecter) DeletePoolByID(ctx interface{}, poolID interface{}) *Store_DeletePoolByID_Call {
+ return &Store_DeletePoolByID_Call{Call: _e.mock.On("DeletePoolByID", ctx, poolID)}
+}
+
+func (_c *Store_DeletePoolByID_Call) Run(run func(ctx context.Context, poolID string)) *Store_DeletePoolByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeletePoolByID_Call) Return(_a0 error) *Store_DeletePoolByID_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeletePoolByID_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeletePoolByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteRepository provides a mock function with given fields: ctx, repoID
func (_m *Store) DeleteRepository(ctx context.Context, repoID string) error {
ret := _m.Called(ctx, repoID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteRepository")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, repoID)
@@ -408,13 +1563,46 @@ func (_m *Store) DeleteRepository(ctx context.Context, repoID string) error {
return r0
}
-// DeleteRepositoryPool provides a mock function with given fields: ctx, repoID, poolID
-func (_m *Store) DeleteRepositoryPool(ctx context.Context, repoID string, poolID string) error {
- ret := _m.Called(ctx, repoID, poolID)
+// Store_DeleteRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteRepository'
+type Store_DeleteRepository_Call struct {
+ *mock.Call
+}
+
+// DeleteRepository is a helper method to define mock.On call
+// - ctx context.Context
+// - repoID string
+func (_e *Store_Expecter) DeleteRepository(ctx interface{}, repoID interface{}) *Store_DeleteRepository_Call {
+ return &Store_DeleteRepository_Call{Call: _e.mock.On("DeleteRepository", ctx, repoID)}
+}
+
+func (_c *Store_DeleteRepository_Call) Run(run func(ctx context.Context, repoID string)) *Store_DeleteRepository_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteRepository_Call) Return(_a0 error) *Store_DeleteRepository_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteRepository_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteRepository_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteScaleSetByID provides a mock function with given fields: ctx, scaleSetID
+func (_m *Store) DeleteScaleSetByID(ctx context.Context, scaleSetID uint) error {
+ ret := _m.Called(ctx, scaleSetID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteScaleSetByID")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
- r0 = rf(ctx, repoID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, uint) error); ok {
+ r0 = rf(ctx, scaleSetID)
} else {
r0 = ret.Error(0)
}
@@ -422,64 +1610,49 @@ func (_m *Store) DeleteRepositoryPool(ctx context.Context, repoID string, poolID
return r0
}
-// FindEnterprisePoolByTags provides a mock function with given fields: ctx, enterpriseID, tags
-func (_m *Store) FindEnterprisePoolByTags(ctx context.Context, enterpriseID string, tags []string) (params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID, tags)
-
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) (params.Pool, error)); ok {
- return rf(ctx, enterpriseID, tags)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) params.Pool); ok {
- r0 = rf(ctx, enterpriseID, tags)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, []string) error); ok {
- r1 = rf(ctx, enterpriseID, tags)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_DeleteScaleSetByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteScaleSetByID'
+type Store_DeleteScaleSetByID_Call struct {
+ *mock.Call
}
-// FindOrganizationPoolByTags provides a mock function with given fields: ctx, orgID, tags
-func (_m *Store) FindOrganizationPoolByTags(ctx context.Context, orgID string, tags []string) (params.Pool, error) {
- ret := _m.Called(ctx, orgID, tags)
+// DeleteScaleSetByID is a helper method to define mock.On call
+// - ctx context.Context
+// - scaleSetID uint
+func (_e *Store_Expecter) DeleteScaleSetByID(ctx interface{}, scaleSetID interface{}) *Store_DeleteScaleSetByID_Call {
+ return &Store_DeleteScaleSetByID_Call{Call: _e.mock.On("DeleteScaleSetByID", ctx, scaleSetID)}
+}
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) (params.Pool, error)); ok {
- return rf(ctx, orgID, tags)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) params.Pool); ok {
- r0 = rf(ctx, orgID, tags)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
+func (_c *Store_DeleteScaleSetByID_Call) Run(run func(ctx context.Context, scaleSetID uint)) *Store_DeleteScaleSetByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, []string) error); ok {
- r1 = rf(ctx, orgID, tags)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_DeleteScaleSetByID_Call) Return(err error) *Store_DeleteScaleSetByID_Call {
+ _c.Call.Return(err)
+ return _c
+}
- return r0, r1
+func (_c *Store_DeleteScaleSetByID_Call) RunAndReturn(run func(context.Context, uint) error) *Store_DeleteScaleSetByID_Call {
+ _c.Call.Return(run)
+ return _c
}
// FindPoolsMatchingAllTags provides a mock function with given fields: ctx, entityType, entityID, tags
-func (_m *Store) FindPoolsMatchingAllTags(ctx context.Context, entityType params.PoolType, entityID string, tags []string) ([]params.Pool, error) {
+func (_m *Store) FindPoolsMatchingAllTags(ctx context.Context, entityType params.ForgeEntityType, entityID string, tags []string) ([]params.Pool, error) {
ret := _m.Called(ctx, entityType, entityID, tags)
+ if len(ret) == 0 {
+ panic("no return value specified for FindPoolsMatchingAllTags")
+ }
+
var r0 []params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, params.PoolType, string, []string) ([]params.Pool, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string, []string) ([]params.Pool, error)); ok {
return rf(ctx, entityType, entityID, tags)
}
- if rf, ok := ret.Get(0).(func(context.Context, params.PoolType, string, []string) []params.Pool); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string, []string) []params.Pool); ok {
r0 = rf(ctx, entityType, entityID, tags)
} else {
if ret.Get(0) != nil {
@@ -487,7 +1660,7 @@ func (_m *Store) FindPoolsMatchingAllTags(ctx context.Context, entityType params
}
}
- if rf, ok := ret.Get(1).(func(context.Context, params.PoolType, string, []string) error); ok {
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntityType, string, []string) error); ok {
r1 = rf(ctx, entityType, entityID, tags)
} else {
r1 = ret.Error(1)
@@ -496,23 +1669,58 @@ func (_m *Store) FindPoolsMatchingAllTags(ctx context.Context, entityType params
return r0, r1
}
-// FindRepositoryPoolByTags provides a mock function with given fields: ctx, repoID, tags
-func (_m *Store) FindRepositoryPoolByTags(ctx context.Context, repoID string, tags []string) (params.Pool, error) {
- ret := _m.Called(ctx, repoID, tags)
+// Store_FindPoolsMatchingAllTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindPoolsMatchingAllTags'
+type Store_FindPoolsMatchingAllTags_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
+// FindPoolsMatchingAllTags is a helper method to define mock.On call
+// - ctx context.Context
+// - entityType params.ForgeEntityType
+// - entityID string
+// - tags []string
+func (_e *Store_Expecter) FindPoolsMatchingAllTags(ctx interface{}, entityType interface{}, entityID interface{}, tags interface{}) *Store_FindPoolsMatchingAllTags_Call {
+ return &Store_FindPoolsMatchingAllTags_Call{Call: _e.mock.On("FindPoolsMatchingAllTags", ctx, entityType, entityID, tags)}
+}
+
+func (_c *Store_FindPoolsMatchingAllTags_Call) Run(run func(ctx context.Context, entityType params.ForgeEntityType, entityID string, tags []string)) *Store_FindPoolsMatchingAllTags_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntityType), args[2].(string), args[3].([]string))
+ })
+ return _c
+}
+
+func (_c *Store_FindPoolsMatchingAllTags_Call) Return(_a0 []params.Pool, _a1 error) *Store_FindPoolsMatchingAllTags_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_FindPoolsMatchingAllTags_Call) RunAndReturn(run func(context.Context, params.ForgeEntityType, string, []string) ([]params.Pool, error)) *Store_FindPoolsMatchingAllTags_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetAdminUser provides a mock function with given fields: ctx
+func (_m *Store) GetAdminUser(ctx context.Context) (params.User, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetAdminUser")
+ }
+
+ var r0 params.User
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) (params.Pool, error)); ok {
- return rf(ctx, repoID, tags)
+ if rf, ok := ret.Get(0).(func(context.Context) (params.User, error)); ok {
+ return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) params.Pool); ok {
- r0 = rf(ctx, repoID, tags)
+ if rf, ok := ret.Get(0).(func(context.Context) params.User); ok {
+ r0 = rf(ctx)
} else {
- r0 = ret.Get(0).(params.Pool)
+ r0 = ret.Get(0).(params.User)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, []string) error); ok {
- r1 = rf(ctx, repoID, tags)
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
@@ -520,23 +1728,55 @@ func (_m *Store) FindRepositoryPoolByTags(ctx context.Context, repoID string, ta
return r0, r1
}
-// GetEnterprise provides a mock function with given fields: ctx, name
-func (_m *Store) GetEnterprise(ctx context.Context, name string) (params.Enterprise, error) {
- ret := _m.Called(ctx, name)
+// Store_GetAdminUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAdminUser'
+type Store_GetAdminUser_Call struct {
+ *mock.Call
+}
+
+// GetAdminUser is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) GetAdminUser(ctx interface{}) *Store_GetAdminUser_Call {
+ return &Store_GetAdminUser_Call{Call: _e.mock.On("GetAdminUser", ctx)}
+}
+
+func (_c *Store_GetAdminUser_Call) Run(run func(ctx context.Context)) *Store_GetAdminUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_GetAdminUser_Call) Return(_a0 params.User, _a1 error) *Store_GetAdminUser_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetAdminUser_Call) RunAndReturn(run func(context.Context) (params.User, error)) *Store_GetAdminUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEnterprise provides a mock function with given fields: ctx, name, endpointName
+func (_m *Store) GetEnterprise(ctx context.Context, name string, endpointName string) (params.Enterprise, error) {
+ ret := _m.Called(ctx, name, endpointName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEnterprise")
+ }
var r0 params.Enterprise
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (params.Enterprise, error)); ok {
- return rf(ctx, name)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Enterprise, error)); ok {
+ return rf(ctx, name, endpointName)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) params.Enterprise); ok {
- r0 = rf(ctx, name)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Enterprise); ok {
+ r0 = rf(ctx, name, endpointName)
} else {
r0 = ret.Get(0).(params.Enterprise)
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, name)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
+ r1 = rf(ctx, name, endpointName)
} else {
r1 = ret.Error(1)
}
@@ -544,10 +1784,44 @@ func (_m *Store) GetEnterprise(ctx context.Context, name string) (params.Enterpr
return r0, r1
}
+// Store_GetEnterprise_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnterprise'
+type Store_GetEnterprise_Call struct {
+ *mock.Call
+}
+
+// GetEnterprise is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - endpointName string
+func (_e *Store_Expecter) GetEnterprise(ctx interface{}, name interface{}, endpointName interface{}) *Store_GetEnterprise_Call {
+ return &Store_GetEnterprise_Call{Call: _e.mock.On("GetEnterprise", ctx, name, endpointName)}
+}
+
+func (_c *Store_GetEnterprise_Call) Run(run func(ctx context.Context, name string, endpointName string)) *Store_GetEnterprise_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetEnterprise_Call) Return(_a0 params.Enterprise, _a1 error) *Store_GetEnterprise_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetEnterprise_Call) RunAndReturn(run func(context.Context, string, string) (params.Enterprise, error)) *Store_GetEnterprise_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetEnterpriseByID provides a mock function with given fields: ctx, enterpriseID
func (_m *Store) GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error) {
ret := _m.Called(ctx, enterpriseID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetEnterpriseByID")
+ }
+
var r0 params.Enterprise
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Enterprise, error)); ok {
@@ -568,23 +1842,56 @@ func (_m *Store) GetEnterpriseByID(ctx context.Context, enterpriseID string) (pa
return r0, r1
}
-// GetEnterprisePool provides a mock function with given fields: ctx, enterpriseID, poolID
-func (_m *Store) GetEnterprisePool(ctx context.Context, enterpriseID string, poolID string) (params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID, poolID)
+// Store_GetEnterpriseByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnterpriseByID'
+type Store_GetEnterpriseByID_Call struct {
+ *mock.Call
+}
+
+// GetEnterpriseByID is a helper method to define mock.On call
+// - ctx context.Context
+// - enterpriseID string
+func (_e *Store_Expecter) GetEnterpriseByID(ctx interface{}, enterpriseID interface{}) *Store_GetEnterpriseByID_Call {
+ return &Store_GetEnterpriseByID_Call{Call: _e.mock.On("GetEnterpriseByID", ctx, enterpriseID)}
+}
+
+func (_c *Store_GetEnterpriseByID_Call) Run(run func(ctx context.Context, enterpriseID string)) *Store_GetEnterpriseByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetEnterpriseByID_Call) Return(_a0 params.Enterprise, _a1 error) *Store_GetEnterpriseByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetEnterpriseByID_Call) RunAndReturn(run func(context.Context, string) (params.Enterprise, error)) *Store_GetEnterpriseByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityPool provides a mock function with given fields: ctx, entity, poolID
+func (_m *Store) GetEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string) (params.Pool, error) {
+ ret := _m.Called(ctx, entity, poolID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityPool")
+ }
var r0 params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Pool, error)); ok {
- return rf(ctx, enterpriseID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string) (params.Pool, error)); ok {
+ return rf(ctx, entity, poolID)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Pool); ok {
- r0 = rf(ctx, enterpriseID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string) params.Pool); ok {
+ r0 = rf(ctx, entity, poolID)
} else {
r0 = ret.Get(0).(params.Pool)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, enterpriseID, poolID)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, string) error); ok {
+ r1 = rf(ctx, entity, poolID)
} else {
r1 = ret.Error(1)
}
@@ -592,23 +1899,461 @@ func (_m *Store) GetEnterprisePool(ctx context.Context, enterpriseID string, poo
return r0, r1
}
-// GetInstanceByName provides a mock function with given fields: ctx, instanceName
-func (_m *Store) GetInstanceByName(ctx context.Context, instanceName string) (params.Instance, error) {
- ret := _m.Called(ctx, instanceName)
+// Store_GetEntityPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityPool'
+type Store_GetEntityPool_Call struct {
+ *mock.Call
+}
+
+// GetEntityPool is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - poolID string
+func (_e *Store_Expecter) GetEntityPool(ctx interface{}, entity interface{}, poolID interface{}) *Store_GetEntityPool_Call {
+ return &Store_GetEntityPool_Call{Call: _e.mock.On("GetEntityPool", ctx, entity, poolID)}
+}
+
+func (_c *Store_GetEntityPool_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, poolID string)) *Store_GetEntityPool_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetEntityPool_Call) Return(_a0 params.Pool, _a1 error) *Store_GetEntityPool_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetEntityPool_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, string) (params.Pool, error)) *Store_GetEntityPool_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetForgeEntity provides a mock function with given fields: _a0, entityType, entityID
+func (_m *Store) GetForgeEntity(_a0 context.Context, entityType params.ForgeEntityType, entityID string) (params.ForgeEntity, error) {
+ ret := _m.Called(_a0, entityType, entityID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetForgeEntity")
+ }
+
+ var r0 params.ForgeEntity
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string) (params.ForgeEntity, error)); ok {
+ return rf(_a0, entityType, entityID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string) params.ForgeEntity); ok {
+ r0 = rf(_a0, entityType, entityID)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEntity)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntityType, string) error); ok {
+ r1 = rf(_a0, entityType, entityID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetForgeEntity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetForgeEntity'
+type Store_GetForgeEntity_Call struct {
+ *mock.Call
+}
+
+// GetForgeEntity is a helper method to define mock.On call
+// - _a0 context.Context
+// - entityType params.ForgeEntityType
+// - entityID string
+func (_e *Store_Expecter) GetForgeEntity(_a0 interface{}, entityType interface{}, entityID interface{}) *Store_GetForgeEntity_Call {
+ return &Store_GetForgeEntity_Call{Call: _e.mock.On("GetForgeEntity", _a0, entityType, entityID)}
+}
+
+func (_c *Store_GetForgeEntity_Call) Run(run func(_a0 context.Context, entityType params.ForgeEntityType, entityID string)) *Store_GetForgeEntity_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntityType), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetForgeEntity_Call) Return(_a0 params.ForgeEntity, _a1 error) *Store_GetForgeEntity_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetForgeEntity_Call) RunAndReturn(run func(context.Context, params.ForgeEntityType, string) (params.ForgeEntity, error)) *Store_GetForgeEntity_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGiteaCredentials provides a mock function with given fields: ctx, id, detailed
+func (_m *Store) GetGiteaCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, id, detailed)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGiteaCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, bool) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, id, detailed)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint, bool) params.ForgeCredentials); ok {
+ r0 = rf(ctx, id, detailed)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint, bool) error); ok {
+ r1 = rf(ctx, id, detailed)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGiteaCredentials'
+type Store_GetGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// GetGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+// - detailed bool
+func (_e *Store_Expecter) GetGiteaCredentials(ctx interface{}, id interface{}, detailed interface{}) *Store_GetGiteaCredentials_Call {
+ return &Store_GetGiteaCredentials_Call{Call: _e.mock.On("GetGiteaCredentials", ctx, id, detailed)}
+}
+
+func (_c *Store_GetGiteaCredentials_Call) Run(run func(ctx context.Context, id uint, detailed bool)) *Store_GetGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(bool))
+ })
+ return _c
+}
+
+func (_c *Store_GetGiteaCredentials_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_GetGiteaCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGiteaCredentials_Call) RunAndReturn(run func(context.Context, uint, bool) (params.ForgeCredentials, error)) *Store_GetGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGiteaCredentialsByName provides a mock function with given fields: ctx, name, detailed
+func (_m *Store) GetGiteaCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, name, detailed)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGiteaCredentialsByName")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, name, detailed)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) params.ForgeCredentials); ok {
+ r0 = rf(ctx, name, detailed)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok {
+ r1 = rf(ctx, name, detailed)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGiteaCredentialsByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGiteaCredentialsByName'
+type Store_GetGiteaCredentialsByName_Call struct {
+ *mock.Call
+}
+
+// GetGiteaCredentialsByName is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - detailed bool
+func (_e *Store_Expecter) GetGiteaCredentialsByName(ctx interface{}, name interface{}, detailed interface{}) *Store_GetGiteaCredentialsByName_Call {
+ return &Store_GetGiteaCredentialsByName_Call{Call: _e.mock.On("GetGiteaCredentialsByName", ctx, name, detailed)}
+}
+
+func (_c *Store_GetGiteaCredentialsByName_Call) Run(run func(ctx context.Context, name string, detailed bool)) *Store_GetGiteaCredentialsByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(bool))
+ })
+ return _c
+}
+
+func (_c *Store_GetGiteaCredentialsByName_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_GetGiteaCredentialsByName_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGiteaCredentialsByName_Call) RunAndReturn(run func(context.Context, string, bool) (params.ForgeCredentials, error)) *Store_GetGiteaCredentialsByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGiteaEndpoint provides a mock function with given fields: _a0, name
+func (_m *Store) GetGiteaEndpoint(_a0 context.Context, name string) (params.ForgeEndpoint, error) {
+ ret := _m.Called(_a0, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGiteaEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (params.ForgeEndpoint, error)); ok {
+ return rf(_a0, name)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) params.ForgeEndpoint); ok {
+ r0 = rf(_a0, name)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(_a0, name)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGiteaEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGiteaEndpoint'
+type Store_GetGiteaEndpoint_Call struct {
+ *mock.Call
+}
+
+// GetGiteaEndpoint is a helper method to define mock.On call
+// - _a0 context.Context
+// - name string
+func (_e *Store_Expecter) GetGiteaEndpoint(_a0 interface{}, name interface{}) *Store_GetGiteaEndpoint_Call {
+ return &Store_GetGiteaEndpoint_Call{Call: _e.mock.On("GetGiteaEndpoint", _a0, name)}
+}
+
+func (_c *Store_GetGiteaEndpoint_Call) Run(run func(_a0 context.Context, name string)) *Store_GetGiteaEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetGiteaEndpoint_Call) Return(_a0 params.ForgeEndpoint, _a1 error) *Store_GetGiteaEndpoint_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGiteaEndpoint_Call) RunAndReturn(run func(context.Context, string) (params.ForgeEndpoint, error)) *Store_GetGiteaEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGithubCredentials provides a mock function with given fields: ctx, id, detailed
+func (_m *Store) GetGithubCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, id, detailed)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGithubCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, bool) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, id, detailed)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint, bool) params.ForgeCredentials); ok {
+ r0 = rf(ctx, id, detailed)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint, bool) error); ok {
+ r1 = rf(ctx, id, detailed)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGithubCredentials'
+type Store_GetGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// GetGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+// - detailed bool
+func (_e *Store_Expecter) GetGithubCredentials(ctx interface{}, id interface{}, detailed interface{}) *Store_GetGithubCredentials_Call {
+ return &Store_GetGithubCredentials_Call{Call: _e.mock.On("GetGithubCredentials", ctx, id, detailed)}
+}
+
+func (_c *Store_GetGithubCredentials_Call) Run(run func(ctx context.Context, id uint, detailed bool)) *Store_GetGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(bool))
+ })
+ return _c
+}
+
+func (_c *Store_GetGithubCredentials_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_GetGithubCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGithubCredentials_Call) RunAndReturn(run func(context.Context, uint, bool) (params.ForgeCredentials, error)) *Store_GetGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGithubCredentialsByName provides a mock function with given fields: ctx, name, detailed
+func (_m *Store) GetGithubCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, name, detailed)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGithubCredentialsByName")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, name, detailed)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) params.ForgeCredentials); ok {
+ r0 = rf(ctx, name, detailed)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok {
+ r1 = rf(ctx, name, detailed)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGithubCredentialsByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGithubCredentialsByName'
+type Store_GetGithubCredentialsByName_Call struct {
+ *mock.Call
+}
+
+// GetGithubCredentialsByName is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - detailed bool
+func (_e *Store_Expecter) GetGithubCredentialsByName(ctx interface{}, name interface{}, detailed interface{}) *Store_GetGithubCredentialsByName_Call {
+ return &Store_GetGithubCredentialsByName_Call{Call: _e.mock.On("GetGithubCredentialsByName", ctx, name, detailed)}
+}
+
+func (_c *Store_GetGithubCredentialsByName_Call) Run(run func(ctx context.Context, name string, detailed bool)) *Store_GetGithubCredentialsByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(bool))
+ })
+ return _c
+}
+
+func (_c *Store_GetGithubCredentialsByName_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_GetGithubCredentialsByName_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGithubCredentialsByName_Call) RunAndReturn(run func(context.Context, string, bool) (params.ForgeCredentials, error)) *Store_GetGithubCredentialsByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGithubEndpoint provides a mock function with given fields: ctx, name
+func (_m *Store) GetGithubEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error) {
+ ret := _m.Called(ctx, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGithubEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (params.ForgeEndpoint, error)); ok {
+ return rf(ctx, name)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) params.ForgeEndpoint); ok {
+ r0 = rf(ctx, name)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, name)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGithubEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGithubEndpoint'
+type Store_GetGithubEndpoint_Call struct {
+ *mock.Call
+}
+
+// GetGithubEndpoint is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+func (_e *Store_Expecter) GetGithubEndpoint(ctx interface{}, name interface{}) *Store_GetGithubEndpoint_Call {
+ return &Store_GetGithubEndpoint_Call{Call: _e.mock.On("GetGithubEndpoint", ctx, name)}
+}
+
+func (_c *Store_GetGithubEndpoint_Call) Run(run func(ctx context.Context, name string)) *Store_GetGithubEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetGithubEndpoint_Call) Return(_a0 params.ForgeEndpoint, _a1 error) *Store_GetGithubEndpoint_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGithubEndpoint_Call) RunAndReturn(run func(context.Context, string) (params.ForgeEndpoint, error)) *Store_GetGithubEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetInstance provides a mock function with given fields: ctx, instanceNameOrID
+func (_m *Store) GetInstance(ctx context.Context, instanceNameOrID string) (params.Instance, error) {
+ ret := _m.Called(ctx, instanceNameOrID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetInstance")
+ }
var r0 params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Instance, error)); ok {
- return rf(ctx, instanceName)
+ return rf(ctx, instanceNameOrID)
}
if rf, ok := ret.Get(0).(func(context.Context, string) params.Instance); ok {
- r0 = rf(ctx, instanceName)
+ r0 = rf(ctx, instanceNameOrID)
} else {
r0 = ret.Get(0).(params.Instance)
}
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, instanceName)
+ r1 = rf(ctx, instanceNameOrID)
} else {
r1 = ret.Error(1)
}
@@ -616,10 +2361,43 @@ func (_m *Store) GetInstanceByName(ctx context.Context, instanceName string) (pa
return r0, r1
}
+// Store_GetInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInstance'
+type Store_GetInstance_Call struct {
+ *mock.Call
+}
+
+// GetInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - instanceNameOrID string
+func (_e *Store_Expecter) GetInstance(ctx interface{}, instanceNameOrID interface{}) *Store_GetInstance_Call {
+ return &Store_GetInstance_Call{Call: _e.mock.On("GetInstance", ctx, instanceNameOrID)}
+}
+
+func (_c *Store_GetInstance_Call) Run(run func(ctx context.Context, instanceNameOrID string)) *Store_GetInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetInstance_Call) Return(_a0 params.Instance, _a1 error) *Store_GetInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetInstance_Call) RunAndReturn(run func(context.Context, string) (params.Instance, error)) *Store_GetInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetJobByID provides a mock function with given fields: ctx, jobID
func (_m *Store) GetJobByID(ctx context.Context, jobID int64) (params.Job, error) {
ret := _m.Called(ctx, jobID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetJobByID")
+ }
+
var r0 params.Job
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int64) (params.Job, error)); ok {
@@ -640,23 +2418,56 @@ func (_m *Store) GetJobByID(ctx context.Context, jobID int64) (params.Job, error
return r0, r1
}
-// GetOrganization provides a mock function with given fields: ctx, name
-func (_m *Store) GetOrganization(ctx context.Context, name string) (params.Organization, error) {
- ret := _m.Called(ctx, name)
+// Store_GetJobByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetJobByID'
+type Store_GetJobByID_Call struct {
+ *mock.Call
+}
+
+// GetJobByID is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+func (_e *Store_Expecter) GetJobByID(ctx interface{}, jobID interface{}) *Store_GetJobByID_Call {
+ return &Store_GetJobByID_Call{Call: _e.mock.On("GetJobByID", ctx, jobID)}
+}
+
+func (_c *Store_GetJobByID_Call) Run(run func(ctx context.Context, jobID int64)) *Store_GetJobByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Store_GetJobByID_Call) Return(_a0 params.Job, _a1 error) *Store_GetJobByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetJobByID_Call) RunAndReturn(run func(context.Context, int64) (params.Job, error)) *Store_GetJobByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetOrganization provides a mock function with given fields: ctx, name, endpointName
+func (_m *Store) GetOrganization(ctx context.Context, name string, endpointName string) (params.Organization, error) {
+ ret := _m.Called(ctx, name, endpointName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrganization")
+ }
var r0 params.Organization
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (params.Organization, error)); ok {
- return rf(ctx, name)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Organization, error)); ok {
+ return rf(ctx, name, endpointName)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) params.Organization); ok {
- r0 = rf(ctx, name)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Organization); ok {
+ r0 = rf(ctx, name, endpointName)
} else {
r0 = ret.Get(0).(params.Organization)
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, name)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
+ r1 = rf(ctx, name, endpointName)
} else {
r1 = ret.Error(1)
}
@@ -664,10 +2475,44 @@ func (_m *Store) GetOrganization(ctx context.Context, name string) (params.Organ
return r0, r1
}
+// Store_GetOrganization_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrganization'
+type Store_GetOrganization_Call struct {
+ *mock.Call
+}
+
+// GetOrganization is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - endpointName string
+func (_e *Store_Expecter) GetOrganization(ctx interface{}, name interface{}, endpointName interface{}) *Store_GetOrganization_Call {
+ return &Store_GetOrganization_Call{Call: _e.mock.On("GetOrganization", ctx, name, endpointName)}
+}
+
+func (_c *Store_GetOrganization_Call) Run(run func(ctx context.Context, name string, endpointName string)) *Store_GetOrganization_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetOrganization_Call) Return(_a0 params.Organization, _a1 error) *Store_GetOrganization_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetOrganization_Call) RunAndReturn(run func(context.Context, string, string) (params.Organization, error)) *Store_GetOrganization_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetOrganizationByID provides a mock function with given fields: ctx, orgID
func (_m *Store) GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error) {
ret := _m.Called(ctx, orgID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrganizationByID")
+ }
+
var r0 params.Organization
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Organization, error)); ok {
@@ -688,34 +2533,43 @@ func (_m *Store) GetOrganizationByID(ctx context.Context, orgID string) (params.
return r0, r1
}
-// GetOrganizationPool provides a mock function with given fields: ctx, orgID, poolID
-func (_m *Store) GetOrganizationPool(ctx context.Context, orgID string, poolID string) (params.Pool, error) {
- ret := _m.Called(ctx, orgID, poolID)
+// Store_GetOrganizationByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrganizationByID'
+type Store_GetOrganizationByID_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Pool, error)); ok {
- return rf(ctx, orgID, poolID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Pool); ok {
- r0 = rf(ctx, orgID, poolID)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
+// GetOrganizationByID is a helper method to define mock.On call
+// - ctx context.Context
+// - orgID string
+func (_e *Store_Expecter) GetOrganizationByID(ctx interface{}, orgID interface{}) *Store_GetOrganizationByID_Call {
+ return &Store_GetOrganizationByID_Call{Call: _e.mock.On("GetOrganizationByID", ctx, orgID)}
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, orgID, poolID)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_GetOrganizationByID_Call) Run(run func(ctx context.Context, orgID string)) *Store_GetOrganizationByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
- return r0, r1
+func (_c *Store_GetOrganizationByID_Call) Return(_a0 params.Organization, _a1 error) *Store_GetOrganizationByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetOrganizationByID_Call) RunAndReturn(run func(context.Context, string) (params.Organization, error)) *Store_GetOrganizationByID_Call {
+ _c.Call.Return(run)
+ return _c
}
// GetPoolByID provides a mock function with given fields: ctx, poolID
func (_m *Store) GetPoolByID(ctx context.Context, poolID string) (params.Pool, error) {
ret := _m.Called(ctx, poolID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetPoolByID")
+ }
+
var r0 params.Pool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Pool, error)); ok {
@@ -736,23 +2590,56 @@ func (_m *Store) GetPoolByID(ctx context.Context, poolID string) (params.Pool, e
return r0, r1
}
-// GetPoolInstanceByName provides a mock function with given fields: ctx, poolID, instanceName
-func (_m *Store) GetPoolInstanceByName(ctx context.Context, poolID string, instanceName string) (params.Instance, error) {
- ret := _m.Called(ctx, poolID, instanceName)
+// Store_GetPoolByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPoolByID'
+type Store_GetPoolByID_Call struct {
+ *mock.Call
+}
- var r0 params.Instance
+// GetPoolByID is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+func (_e *Store_Expecter) GetPoolByID(ctx interface{}, poolID interface{}) *Store_GetPoolByID_Call {
+ return &Store_GetPoolByID_Call{Call: _e.mock.On("GetPoolByID", ctx, poolID)}
+}
+
+func (_c *Store_GetPoolByID_Call) Run(run func(ctx context.Context, poolID string)) *Store_GetPoolByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetPoolByID_Call) Return(_a0 params.Pool, _a1 error) *Store_GetPoolByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetPoolByID_Call) RunAndReturn(run func(context.Context, string) (params.Pool, error)) *Store_GetPoolByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetRepository provides a mock function with given fields: ctx, owner, name, endpointName
+func (_m *Store) GetRepository(ctx context.Context, owner string, name string, endpointName string) (params.Repository, error) {
+ ret := _m.Called(ctx, owner, name, endpointName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepository")
+ }
+
+ var r0 params.Repository
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Instance, error)); ok {
- return rf(ctx, poolID, instanceName)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (params.Repository, error)); ok {
+ return rf(ctx, owner, name, endpointName)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Instance); ok {
- r0 = rf(ctx, poolID, instanceName)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, string) params.Repository); ok {
+ r0 = rf(ctx, owner, name, endpointName)
} else {
- r0 = ret.Get(0).(params.Instance)
+ r0 = ret.Get(0).(params.Repository)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, poolID, instanceName)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
+ r1 = rf(ctx, owner, name, endpointName)
} else {
r1 = ret.Error(1)
}
@@ -760,34 +2647,45 @@ func (_m *Store) GetPoolInstanceByName(ctx context.Context, poolID string, insta
return r0, r1
}
-// GetRepository provides a mock function with given fields: ctx, owner, name
-func (_m *Store) GetRepository(ctx context.Context, owner string, name string) (params.Repository, error) {
- ret := _m.Called(ctx, owner, name)
+// Store_GetRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepository'
+type Store_GetRepository_Call struct {
+ *mock.Call
+}
- var r0 params.Repository
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Repository, error)); ok {
- return rf(ctx, owner, name)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Repository); ok {
- r0 = rf(ctx, owner, name)
- } else {
- r0 = ret.Get(0).(params.Repository)
- }
+// GetRepository is a helper method to define mock.On call
+// - ctx context.Context
+// - owner string
+// - name string
+// - endpointName string
+func (_e *Store_Expecter) GetRepository(ctx interface{}, owner interface{}, name interface{}, endpointName interface{}) *Store_GetRepository_Call {
+ return &Store_GetRepository_Call{Call: _e.mock.On("GetRepository", ctx, owner, name, endpointName)}
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, owner, name)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_GetRepository_Call) Run(run func(ctx context.Context, owner string, name string, endpointName string)) *Store_GetRepository_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string))
+ })
+ return _c
+}
- return r0, r1
+func (_c *Store_GetRepository_Call) Return(_a0 params.Repository, _a1 error) *Store_GetRepository_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetRepository_Call) RunAndReturn(run func(context.Context, string, string, string) (params.Repository, error)) *Store_GetRepository_Call {
+ _c.Call.Return(run)
+ return _c
}
// GetRepositoryByID provides a mock function with given fields: ctx, repoID
func (_m *Store) GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error) {
ret := _m.Called(ctx, repoID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepositoryByID")
+ }
+
var r0 params.Repository
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Repository, error)); ok {
@@ -808,23 +2706,56 @@ func (_m *Store) GetRepositoryByID(ctx context.Context, repoID string) (params.R
return r0, r1
}
-// GetRepositoryPool provides a mock function with given fields: ctx, repoID, poolID
-func (_m *Store) GetRepositoryPool(ctx context.Context, repoID string, poolID string) (params.Pool, error) {
- ret := _m.Called(ctx, repoID, poolID)
+// Store_GetRepositoryByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepositoryByID'
+type Store_GetRepositoryByID_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
+// GetRepositoryByID is a helper method to define mock.On call
+// - ctx context.Context
+// - repoID string
+func (_e *Store_Expecter) GetRepositoryByID(ctx interface{}, repoID interface{}) *Store_GetRepositoryByID_Call {
+ return &Store_GetRepositoryByID_Call{Call: _e.mock.On("GetRepositoryByID", ctx, repoID)}
+}
+
+func (_c *Store_GetRepositoryByID_Call) Run(run func(ctx context.Context, repoID string)) *Store_GetRepositoryByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetRepositoryByID_Call) Return(_a0 params.Repository, _a1 error) *Store_GetRepositoryByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetRepositoryByID_Call) RunAndReturn(run func(context.Context, string) (params.Repository, error)) *Store_GetRepositoryByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetScaleSetByID provides a mock function with given fields: ctx, scaleSet
+func (_m *Store) GetScaleSetByID(ctx context.Context, scaleSet uint) (params.ScaleSet, error) {
+ ret := _m.Called(ctx, scaleSet)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetScaleSetByID")
+ }
+
+ var r0 params.ScaleSet
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Pool, error)); ok {
- return rf(ctx, repoID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, uint) (params.ScaleSet, error)); ok {
+ return rf(ctx, scaleSet)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Pool); ok {
- r0 = rf(ctx, repoID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, uint) params.ScaleSet); ok {
+ r0 = rf(ctx, scaleSet)
} else {
- r0 = ret.Get(0).(params.Pool)
+ r0 = ret.Get(0).(params.ScaleSet)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, repoID, poolID)
+ if rf, ok := ret.Get(1).(func(context.Context, uint) error); ok {
+ r1 = rf(ctx, scaleSet)
} else {
r1 = ret.Error(1)
}
@@ -832,10 +2763,43 @@ func (_m *Store) GetRepositoryPool(ctx context.Context, repoID string, poolID st
return r0, r1
}
+// Store_GetScaleSetByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetScaleSetByID'
+type Store_GetScaleSetByID_Call struct {
+ *mock.Call
+}
+
+// GetScaleSetByID is a helper method to define mock.On call
+// - ctx context.Context
+// - scaleSet uint
+func (_e *Store_Expecter) GetScaleSetByID(ctx interface{}, scaleSet interface{}) *Store_GetScaleSetByID_Call {
+ return &Store_GetScaleSetByID_Call{Call: _e.mock.On("GetScaleSetByID", ctx, scaleSet)}
+}
+
+func (_c *Store_GetScaleSetByID_Call) Run(run func(ctx context.Context, scaleSet uint)) *Store_GetScaleSetByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_GetScaleSetByID_Call) Return(_a0 params.ScaleSet, _a1 error) *Store_GetScaleSetByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetScaleSetByID_Call) RunAndReturn(run func(context.Context, uint) (params.ScaleSet, error)) *Store_GetScaleSetByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetUser provides a mock function with given fields: ctx, user
func (_m *Store) GetUser(ctx context.Context, user string) (params.User, error) {
ret := _m.Called(ctx, user)
+ if len(ret) == 0 {
+ panic("no return value specified for GetUser")
+ }
+
var r0 params.User
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.User, error)); ok {
@@ -856,10 +2820,43 @@ func (_m *Store) GetUser(ctx context.Context, user string) (params.User, error)
return r0, r1
}
+// Store_GetUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUser'
+type Store_GetUser_Call struct {
+ *mock.Call
+}
+
+// GetUser is a helper method to define mock.On call
+// - ctx context.Context
+// - user string
+func (_e *Store_Expecter) GetUser(ctx interface{}, user interface{}) *Store_GetUser_Call {
+ return &Store_GetUser_Call{Call: _e.mock.On("GetUser", ctx, user)}
+}
+
+func (_c *Store_GetUser_Call) Run(run func(ctx context.Context, user string)) *Store_GetUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetUser_Call) Return(_a0 params.User, _a1 error) *Store_GetUser_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetUser_Call) RunAndReturn(run func(context.Context, string) (params.User, error)) *Store_GetUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetUserByID provides a mock function with given fields: ctx, userID
func (_m *Store) GetUserByID(ctx context.Context, userID string) (params.User, error) {
ret := _m.Called(ctx, userID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetUserByID")
+ }
+
var r0 params.User
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.User, error)); ok {
@@ -880,10 +2877,43 @@ func (_m *Store) GetUserByID(ctx context.Context, userID string) (params.User, e
return r0, r1
}
+// Store_GetUserByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUserByID'
+type Store_GetUserByID_Call struct {
+ *mock.Call
+}
+
+// GetUserByID is a helper method to define mock.On call
+// - ctx context.Context
+// - userID string
+func (_e *Store_Expecter) GetUserByID(ctx interface{}, userID interface{}) *Store_GetUserByID_Call {
+ return &Store_GetUserByID_Call{Call: _e.mock.On("GetUserByID", ctx, userID)}
+}
+
+func (_c *Store_GetUserByID_Call) Run(run func(ctx context.Context, userID string)) *Store_GetUserByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetUserByID_Call) Return(_a0 params.User, _a1 error) *Store_GetUserByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetUserByID_Call) RunAndReturn(run func(context.Context, string) (params.User, error)) *Store_GetUserByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// HasAdminUser provides a mock function with given fields: ctx
func (_m *Store) HasAdminUser(ctx context.Context) bool {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for HasAdminUser")
+ }
+
var r0 bool
if rf, ok := ret.Get(0).(func(context.Context) bool); ok {
r0 = rf(ctx)
@@ -894,10 +2924,42 @@ func (_m *Store) HasAdminUser(ctx context.Context) bool {
return r0
}
-// InitController provides a mock function with given fields:
+// Store_HasAdminUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HasAdminUser'
+type Store_HasAdminUser_Call struct {
+ *mock.Call
+}
+
+// HasAdminUser is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) HasAdminUser(ctx interface{}) *Store_HasAdminUser_Call {
+ return &Store_HasAdminUser_Call{Call: _e.mock.On("HasAdminUser", ctx)}
+}
+
+func (_c *Store_HasAdminUser_Call) Run(run func(ctx context.Context)) *Store_HasAdminUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_HasAdminUser_Call) Return(_a0 bool) *Store_HasAdminUser_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_HasAdminUser_Call) RunAndReturn(run func(context.Context) bool) *Store_HasAdminUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// InitController provides a mock function with no fields
func (_m *Store) InitController() (params.ControllerInfo, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for InitController")
+ }
+
var r0 params.ControllerInfo
var r1 error
if rf, ok := ret.Get(0).(func() (params.ControllerInfo, error)); ok {
@@ -918,10 +2980,41 @@ func (_m *Store) InitController() (params.ControllerInfo, error) {
return r0, r1
}
+// Store_InitController_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitController'
+type Store_InitController_Call struct {
+ *mock.Call
+}
+
+// InitController is a helper method to define mock.On call
+func (_e *Store_Expecter) InitController() *Store_InitController_Call {
+ return &Store_InitController_Call{Call: _e.mock.On("InitController")}
+}
+
+func (_c *Store_InitController_Call) Run(run func()) *Store_InitController_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Store_InitController_Call) Return(_a0 params.ControllerInfo, _a1 error) *Store_InitController_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_InitController_Call) RunAndReturn(run func() (params.ControllerInfo, error)) *Store_InitController_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListAllInstances provides a mock function with given fields: ctx
func (_m *Store) ListAllInstances(ctx context.Context) ([]params.Instance, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for ListAllInstances")
+ }
+
var r0 []params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]params.Instance, error)); ok {
@@ -944,10 +3037,42 @@ func (_m *Store) ListAllInstances(ctx context.Context) ([]params.Instance, error
return r0, r1
}
+// Store_ListAllInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAllInstances'
+type Store_ListAllInstances_Call struct {
+ *mock.Call
+}
+
+// ListAllInstances is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListAllInstances(ctx interface{}) *Store_ListAllInstances_Call {
+ return &Store_ListAllInstances_Call{Call: _e.mock.On("ListAllInstances", ctx)}
+}
+
+func (_c *Store_ListAllInstances_Call) Run(run func(ctx context.Context)) *Store_ListAllInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListAllInstances_Call) Return(_a0 []params.Instance, _a1 error) *Store_ListAllInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListAllInstances_Call) RunAndReturn(run func(context.Context) ([]params.Instance, error)) *Store_ListAllInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListAllJobs provides a mock function with given fields: ctx
func (_m *Store) ListAllJobs(ctx context.Context) ([]params.Job, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for ListAllJobs")
+ }
+
var r0 []params.Job
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]params.Job, error)); ok {
@@ -970,10 +3095,42 @@ func (_m *Store) ListAllJobs(ctx context.Context) ([]params.Job, error) {
return r0, r1
}
+// Store_ListAllJobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAllJobs'
+type Store_ListAllJobs_Call struct {
+ *mock.Call
+}
+
+// ListAllJobs is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListAllJobs(ctx interface{}) *Store_ListAllJobs_Call {
+ return &Store_ListAllJobs_Call{Call: _e.mock.On("ListAllJobs", ctx)}
+}
+
+func (_c *Store_ListAllJobs_Call) Run(run func(ctx context.Context)) *Store_ListAllJobs_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListAllJobs_Call) Return(_a0 []params.Job, _a1 error) *Store_ListAllJobs_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListAllJobs_Call) RunAndReturn(run func(context.Context) ([]params.Job, error)) *Store_ListAllJobs_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListAllPools provides a mock function with given fields: ctx
func (_m *Store) ListAllPools(ctx context.Context) ([]params.Pool, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for ListAllPools")
+ }
+
var r0 []params.Pool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]params.Pool, error)); ok {
@@ -996,72 +3153,52 @@ func (_m *Store) ListAllPools(ctx context.Context) ([]params.Pool, error) {
return r0, r1
}
-// ListEnterpriseInstances provides a mock function with given fields: ctx, enterpriseID
-func (_m *Store) ListEnterpriseInstances(ctx context.Context, enterpriseID string) ([]params.Instance, error) {
- ret := _m.Called(ctx, enterpriseID)
-
- var r0 []params.Instance
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
- return rf(ctx, enterpriseID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Instance); ok {
- r0 = rf(ctx, enterpriseID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Instance)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, enterpriseID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_ListAllPools_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAllPools'
+type Store_ListAllPools_Call struct {
+ *mock.Call
}
-// ListEnterprisePools provides a mock function with given fields: ctx, enterpriseID
-func (_m *Store) ListEnterprisePools(ctx context.Context, enterpriseID string) ([]params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID)
-
- var r0 []params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Pool, error)); ok {
- return rf(ctx, enterpriseID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Pool); ok {
- r0 = rf(ctx, enterpriseID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Pool)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, enterpriseID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// ListAllPools is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListAllPools(ctx interface{}) *Store_ListAllPools_Call {
+ return &Store_ListAllPools_Call{Call: _e.mock.On("ListAllPools", ctx)}
}
-// ListEnterprises provides a mock function with given fields: ctx
-func (_m *Store) ListEnterprises(ctx context.Context) ([]params.Enterprise, error) {
+func (_c *Store_ListAllPools_Call) Run(run func(ctx context.Context)) *Store_ListAllPools_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListAllPools_Call) Return(_a0 []params.Pool, _a1 error) *Store_ListAllPools_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListAllPools_Call) RunAndReturn(run func(context.Context) ([]params.Pool, error)) *Store_ListAllPools_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListAllScaleSets provides a mock function with given fields: ctx
+func (_m *Store) ListAllScaleSets(ctx context.Context) ([]params.ScaleSet, error) {
ret := _m.Called(ctx)
- var r0 []params.Enterprise
+ if len(ret) == 0 {
+ panic("no return value specified for ListAllScaleSets")
+ }
+
+ var r0 []params.ScaleSet
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context) ([]params.Enterprise, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ScaleSet, error)); ok {
return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context) []params.Enterprise); ok {
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ScaleSet); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Enterprise)
+ r0 = ret.Get(0).([]params.ScaleSet)
}
}
@@ -1074,16 +3211,166 @@ func (_m *Store) ListEnterprises(ctx context.Context) ([]params.Enterprise, erro
return r0, r1
}
+// Store_ListAllScaleSets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAllScaleSets'
+type Store_ListAllScaleSets_Call struct {
+ *mock.Call
+}
+
+// ListAllScaleSets is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListAllScaleSets(ctx interface{}) *Store_ListAllScaleSets_Call {
+ return &Store_ListAllScaleSets_Call{Call: _e.mock.On("ListAllScaleSets", ctx)}
+}
+
+func (_c *Store_ListAllScaleSets_Call) Run(run func(ctx context.Context)) *Store_ListAllScaleSets_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListAllScaleSets_Call) Return(_a0 []params.ScaleSet, _a1 error) *Store_ListAllScaleSets_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListAllScaleSets_Call) RunAndReturn(run func(context.Context) ([]params.ScaleSet, error)) *Store_ListAllScaleSets_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEnterprises provides a mock function with given fields: ctx, filter
+func (_m *Store) ListEnterprises(ctx context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error) {
+ ret := _m.Called(ctx, filter)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEnterprises")
+ }
+
+ var r0 []params.Enterprise
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.EnterpriseFilter) ([]params.Enterprise, error)); ok {
+ return rf(ctx, filter)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.EnterpriseFilter) []params.Enterprise); ok {
+ r0 = rf(ctx, filter)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Enterprise)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.EnterpriseFilter) error); ok {
+ r1 = rf(ctx, filter)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListEnterprises_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEnterprises'
+type Store_ListEnterprises_Call struct {
+ *mock.Call
+}
+
+// ListEnterprises is a helper method to define mock.On call
+// - ctx context.Context
+// - filter params.EnterpriseFilter
+func (_e *Store_Expecter) ListEnterprises(ctx interface{}, filter interface{}) *Store_ListEnterprises_Call {
+ return &Store_ListEnterprises_Call{Call: _e.mock.On("ListEnterprises", ctx, filter)}
+}
+
+func (_c *Store_ListEnterprises_Call) Run(run func(ctx context.Context, filter params.EnterpriseFilter)) *Store_ListEnterprises_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.EnterpriseFilter))
+ })
+ return _c
+}
+
+func (_c *Store_ListEnterprises_Call) Return(_a0 []params.Enterprise, _a1 error) *Store_ListEnterprises_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEnterprises_Call) RunAndReturn(run func(context.Context, params.EnterpriseFilter) ([]params.Enterprise, error)) *Store_ListEnterprises_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityInstances provides a mock function with given fields: ctx, entity
+func (_m *Store) ListEntityInstances(ctx context.Context, entity params.ForgeEntity) ([]params.Instance, error) {
+ ret := _m.Called(ctx, entity)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityInstances")
+ }
+
+ var r0 []params.Instance
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) ([]params.Instance, error)); ok {
+ return rf(ctx, entity)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) []params.Instance); ok {
+ r0 = rf(ctx, entity)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Instance)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity) error); ok {
+ r1 = rf(ctx, entity)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListEntityInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityInstances'
+type Store_ListEntityInstances_Call struct {
+ *mock.Call
+}
+
+// ListEntityInstances is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+func (_e *Store_Expecter) ListEntityInstances(ctx interface{}, entity interface{}) *Store_ListEntityInstances_Call {
+ return &Store_ListEntityInstances_Call{Call: _e.mock.On("ListEntityInstances", ctx, entity)}
+}
+
+func (_c *Store_ListEntityInstances_Call) Run(run func(ctx context.Context, entity params.ForgeEntity)) *Store_ListEntityInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity))
+ })
+ return _c
+}
+
+func (_c *Store_ListEntityInstances_Call) Return(_a0 []params.Instance, _a1 error) *Store_ListEntityInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEntityInstances_Call) RunAndReturn(run func(context.Context, params.ForgeEntity) ([]params.Instance, error)) *Store_ListEntityInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListEntityJobsByStatus provides a mock function with given fields: ctx, entityType, entityID, status
-func (_m *Store) ListEntityJobsByStatus(ctx context.Context, entityType params.PoolType, entityID string, status params.JobStatus) ([]params.Job, error) {
+func (_m *Store) ListEntityJobsByStatus(ctx context.Context, entityType params.ForgeEntityType, entityID string, status params.JobStatus) ([]params.Job, error) {
ret := _m.Called(ctx, entityType, entityID, status)
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityJobsByStatus")
+ }
+
var r0 []params.Job
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, params.PoolType, string, params.JobStatus) ([]params.Job, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string, params.JobStatus) ([]params.Job, error)); ok {
return rf(ctx, entityType, entityID, status)
}
- if rf, ok := ret.Get(0).(func(context.Context, params.PoolType, string, params.JobStatus) []params.Job); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string, params.JobStatus) []params.Job); ok {
r0 = rf(ctx, entityType, entityID, status)
} else {
if ret.Get(0) != nil {
@@ -1091,7 +3378,7 @@ func (_m *Store) ListEntityJobsByStatus(ctx context.Context, entityType params.P
}
}
- if rf, ok := ret.Get(1).(func(context.Context, params.PoolType, string, params.JobStatus) error); ok {
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntityType, string, params.JobStatus) error); ok {
r1 = rf(ctx, entityType, entityID, status)
} else {
r1 = ret.Error(1)
@@ -1100,25 +3387,60 @@ func (_m *Store) ListEntityJobsByStatus(ctx context.Context, entityType params.P
return r0, r1
}
-// ListInstanceEvents provides a mock function with given fields: ctx, instanceID, eventType, eventLevel
-func (_m *Store) ListInstanceEvents(ctx context.Context, instanceID string, eventType params.EventType, eventLevel params.EventLevel) ([]params.StatusMessage, error) {
- ret := _m.Called(ctx, instanceID, eventType, eventLevel)
+// Store_ListEntityJobsByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityJobsByStatus'
+type Store_ListEntityJobsByStatus_Call struct {
+ *mock.Call
+}
- var r0 []params.StatusMessage
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.EventType, params.EventLevel) ([]params.StatusMessage, error)); ok {
- return rf(ctx, instanceID, eventType, eventLevel)
+// ListEntityJobsByStatus is a helper method to define mock.On call
+// - ctx context.Context
+// - entityType params.ForgeEntityType
+// - entityID string
+// - status params.JobStatus
+func (_e *Store_Expecter) ListEntityJobsByStatus(ctx interface{}, entityType interface{}, entityID interface{}, status interface{}) *Store_ListEntityJobsByStatus_Call {
+ return &Store_ListEntityJobsByStatus_Call{Call: _e.mock.On("ListEntityJobsByStatus", ctx, entityType, entityID, status)}
+}
+
+func (_c *Store_ListEntityJobsByStatus_Call) Run(run func(ctx context.Context, entityType params.ForgeEntityType, entityID string, status params.JobStatus)) *Store_ListEntityJobsByStatus_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntityType), args[2].(string), args[3].(params.JobStatus))
+ })
+ return _c
+}
+
+func (_c *Store_ListEntityJobsByStatus_Call) Return(_a0 []params.Job, _a1 error) *Store_ListEntityJobsByStatus_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEntityJobsByStatus_Call) RunAndReturn(run func(context.Context, params.ForgeEntityType, string, params.JobStatus) ([]params.Job, error)) *Store_ListEntityJobsByStatus_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityPools provides a mock function with given fields: ctx, entity
+func (_m *Store) ListEntityPools(ctx context.Context, entity params.ForgeEntity) ([]params.Pool, error) {
+ ret := _m.Called(ctx, entity)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityPools")
}
- if rf, ok := ret.Get(0).(func(context.Context, string, params.EventType, params.EventLevel) []params.StatusMessage); ok {
- r0 = rf(ctx, instanceID, eventType, eventLevel)
+
+ var r0 []params.Pool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) ([]params.Pool, error)); ok {
+ return rf(ctx, entity)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) []params.Pool); ok {
+ r0 = rf(ctx, entity)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.StatusMessage)
+ r0 = ret.Get(0).([]params.Pool)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, params.EventType, params.EventLevel) error); ok {
- r1 = rf(ctx, instanceID, eventType, eventLevel)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity) error); ok {
+ r1 = rf(ctx, entity)
} else {
r1 = ret.Error(1)
}
@@ -1126,10 +3448,334 @@ func (_m *Store) ListInstanceEvents(ctx context.Context, instanceID string, even
return r0, r1
}
+// Store_ListEntityPools_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityPools'
+type Store_ListEntityPools_Call struct {
+ *mock.Call
+}
+
+// ListEntityPools is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+func (_e *Store_Expecter) ListEntityPools(ctx interface{}, entity interface{}) *Store_ListEntityPools_Call {
+ return &Store_ListEntityPools_Call{Call: _e.mock.On("ListEntityPools", ctx, entity)}
+}
+
+func (_c *Store_ListEntityPools_Call) Run(run func(ctx context.Context, entity params.ForgeEntity)) *Store_ListEntityPools_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity))
+ })
+ return _c
+}
+
+func (_c *Store_ListEntityPools_Call) Return(_a0 []params.Pool, _a1 error) *Store_ListEntityPools_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEntityPools_Call) RunAndReturn(run func(context.Context, params.ForgeEntity) ([]params.Pool, error)) *Store_ListEntityPools_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityScaleSets provides a mock function with given fields: _a0, entity
+func (_m *Store) ListEntityScaleSets(_a0 context.Context, entity params.ForgeEntity) ([]params.ScaleSet, error) {
+ ret := _m.Called(_a0, entity)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityScaleSets")
+ }
+
+ var r0 []params.ScaleSet
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) ([]params.ScaleSet, error)); ok {
+ return rf(_a0, entity)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) []params.ScaleSet); ok {
+ r0 = rf(_a0, entity)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ScaleSet)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity) error); ok {
+ r1 = rf(_a0, entity)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListEntityScaleSets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityScaleSets'
+type Store_ListEntityScaleSets_Call struct {
+ *mock.Call
+}
+
+// ListEntityScaleSets is a helper method to define mock.On call
+// - _a0 context.Context
+// - entity params.ForgeEntity
+func (_e *Store_Expecter) ListEntityScaleSets(_a0 interface{}, entity interface{}) *Store_ListEntityScaleSets_Call {
+ return &Store_ListEntityScaleSets_Call{Call: _e.mock.On("ListEntityScaleSets", _a0, entity)}
+}
+
+func (_c *Store_ListEntityScaleSets_Call) Run(run func(_a0 context.Context, entity params.ForgeEntity)) *Store_ListEntityScaleSets_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity))
+ })
+ return _c
+}
+
+func (_c *Store_ListEntityScaleSets_Call) Return(_a0 []params.ScaleSet, _a1 error) *Store_ListEntityScaleSets_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEntityScaleSets_Call) RunAndReturn(run func(context.Context, params.ForgeEntity) ([]params.ScaleSet, error)) *Store_ListEntityScaleSets_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListGiteaCredentials provides a mock function with given fields: ctx
+func (_m *Store) ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListGiteaCredentials")
+ }
+
+ var r0 []params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ForgeCredentials, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ForgeCredentials); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ForgeCredentials)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListGiteaCredentials'
+type Store_ListGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// ListGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListGiteaCredentials(ctx interface{}) *Store_ListGiteaCredentials_Call {
+ return &Store_ListGiteaCredentials_Call{Call: _e.mock.On("ListGiteaCredentials", ctx)}
+}
+
+func (_c *Store_ListGiteaCredentials_Call) Run(run func(ctx context.Context)) *Store_ListGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListGiteaCredentials_Call) Return(_a0 []params.ForgeCredentials, _a1 error) *Store_ListGiteaCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListGiteaCredentials_Call) RunAndReturn(run func(context.Context) ([]params.ForgeCredentials, error)) *Store_ListGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListGiteaEndpoints provides a mock function with given fields: _a0
+func (_m *Store) ListGiteaEndpoints(_a0 context.Context) ([]params.ForgeEndpoint, error) {
+ ret := _m.Called(_a0)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListGiteaEndpoints")
+ }
+
+ var r0 []params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ForgeEndpoint, error)); ok {
+ return rf(_a0)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ForgeEndpoint); ok {
+ r0 = rf(_a0)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ForgeEndpoint)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(_a0)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListGiteaEndpoints_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListGiteaEndpoints'
+type Store_ListGiteaEndpoints_Call struct {
+ *mock.Call
+}
+
+// ListGiteaEndpoints is a helper method to define mock.On call
+// - _a0 context.Context
+func (_e *Store_Expecter) ListGiteaEndpoints(_a0 interface{}) *Store_ListGiteaEndpoints_Call {
+ return &Store_ListGiteaEndpoints_Call{Call: _e.mock.On("ListGiteaEndpoints", _a0)}
+}
+
+func (_c *Store_ListGiteaEndpoints_Call) Run(run func(_a0 context.Context)) *Store_ListGiteaEndpoints_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListGiteaEndpoints_Call) Return(_a0 []params.ForgeEndpoint, _a1 error) *Store_ListGiteaEndpoints_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListGiteaEndpoints_Call) RunAndReturn(run func(context.Context) ([]params.ForgeEndpoint, error)) *Store_ListGiteaEndpoints_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListGithubCredentials provides a mock function with given fields: ctx
+func (_m *Store) ListGithubCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListGithubCredentials")
+ }
+
+ var r0 []params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ForgeCredentials, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ForgeCredentials); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ForgeCredentials)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListGithubCredentials'
+type Store_ListGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// ListGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListGithubCredentials(ctx interface{}) *Store_ListGithubCredentials_Call {
+ return &Store_ListGithubCredentials_Call{Call: _e.mock.On("ListGithubCredentials", ctx)}
+}
+
+func (_c *Store_ListGithubCredentials_Call) Run(run func(ctx context.Context)) *Store_ListGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListGithubCredentials_Call) Return(_a0 []params.ForgeCredentials, _a1 error) *Store_ListGithubCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListGithubCredentials_Call) RunAndReturn(run func(context.Context) ([]params.ForgeCredentials, error)) *Store_ListGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListGithubEndpoints provides a mock function with given fields: ctx
+func (_m *Store) ListGithubEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListGithubEndpoints")
+ }
+
+ var r0 []params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ForgeEndpoint, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ForgeEndpoint); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ForgeEndpoint)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListGithubEndpoints_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListGithubEndpoints'
+type Store_ListGithubEndpoints_Call struct {
+ *mock.Call
+}
+
+// ListGithubEndpoints is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListGithubEndpoints(ctx interface{}) *Store_ListGithubEndpoints_Call {
+ return &Store_ListGithubEndpoints_Call{Call: _e.mock.On("ListGithubEndpoints", ctx)}
+}
+
+func (_c *Store_ListGithubEndpoints_Call) Run(run func(ctx context.Context)) *Store_ListGithubEndpoints_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListGithubEndpoints_Call) Return(_a0 []params.ForgeEndpoint, _a1 error) *Store_ListGithubEndpoints_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListGithubEndpoints_Call) RunAndReturn(run func(context.Context) ([]params.ForgeEndpoint, error)) *Store_ListGithubEndpoints_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListJobsByStatus provides a mock function with given fields: ctx, status
func (_m *Store) ListJobsByStatus(ctx context.Context, status params.JobStatus) ([]params.Job, error) {
ret := _m.Called(ctx, status)
+ if len(ret) == 0 {
+ panic("no return value specified for ListJobsByStatus")
+ }
+
var r0 []params.Job
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.JobStatus) ([]params.Job, error)); ok {
@@ -1152,77 +3798,58 @@ func (_m *Store) ListJobsByStatus(ctx context.Context, status params.JobStatus)
return r0, r1
}
-// ListOrgInstances provides a mock function with given fields: ctx, orgID
-func (_m *Store) ListOrgInstances(ctx context.Context, orgID string) ([]params.Instance, error) {
- ret := _m.Called(ctx, orgID)
-
- var r0 []params.Instance
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
- return rf(ctx, orgID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Instance); ok {
- r0 = rf(ctx, orgID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Instance)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, orgID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_ListJobsByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListJobsByStatus'
+type Store_ListJobsByStatus_Call struct {
+ *mock.Call
}
-// ListOrgPools provides a mock function with given fields: ctx, orgID
-func (_m *Store) ListOrgPools(ctx context.Context, orgID string) ([]params.Pool, error) {
- ret := _m.Called(ctx, orgID)
-
- var r0 []params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Pool, error)); ok {
- return rf(ctx, orgID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Pool); ok {
- r0 = rf(ctx, orgID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Pool)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, orgID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// ListJobsByStatus is a helper method to define mock.On call
+// - ctx context.Context
+// - status params.JobStatus
+func (_e *Store_Expecter) ListJobsByStatus(ctx interface{}, status interface{}) *Store_ListJobsByStatus_Call {
+ return &Store_ListJobsByStatus_Call{Call: _e.mock.On("ListJobsByStatus", ctx, status)}
}
-// ListOrganizations provides a mock function with given fields: ctx
-func (_m *Store) ListOrganizations(ctx context.Context) ([]params.Organization, error) {
- ret := _m.Called(ctx)
+func (_c *Store_ListJobsByStatus_Call) Run(run func(ctx context.Context, status params.JobStatus)) *Store_ListJobsByStatus_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.JobStatus))
+ })
+ return _c
+}
+
+func (_c *Store_ListJobsByStatus_Call) Return(_a0 []params.Job, _a1 error) *Store_ListJobsByStatus_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListJobsByStatus_Call) RunAndReturn(run func(context.Context, params.JobStatus) ([]params.Job, error)) *Store_ListJobsByStatus_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListOrganizations provides a mock function with given fields: ctx, filter
+func (_m *Store) ListOrganizations(ctx context.Context, filter params.OrganizationFilter) ([]params.Organization, error) {
+ ret := _m.Called(ctx, filter)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListOrganizations")
+ }
var r0 []params.Organization
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context) ([]params.Organization, error)); ok {
- return rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, params.OrganizationFilter) ([]params.Organization, error)); ok {
+ return rf(ctx, filter)
}
- if rf, ok := ret.Get(0).(func(context.Context) []params.Organization); ok {
- r0 = rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, params.OrganizationFilter) []params.Organization); ok {
+ r0 = rf(ctx, filter)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]params.Organization)
}
}
- if rf, ok := ret.Get(1).(func(context.Context) error); ok {
- r1 = rf(ctx)
+ if rf, ok := ret.Get(1).(func(context.Context, params.OrganizationFilter) error); ok {
+ r1 = rf(ctx, filter)
} else {
r1 = ret.Error(1)
}
@@ -1230,10 +3857,43 @@ func (_m *Store) ListOrganizations(ctx context.Context) ([]params.Organization,
return r0, r1
}
+// Store_ListOrganizations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListOrganizations'
+type Store_ListOrganizations_Call struct {
+ *mock.Call
+}
+
+// ListOrganizations is a helper method to define mock.On call
+// - ctx context.Context
+// - filter params.OrganizationFilter
+func (_e *Store_Expecter) ListOrganizations(ctx interface{}, filter interface{}) *Store_ListOrganizations_Call {
+ return &Store_ListOrganizations_Call{Call: _e.mock.On("ListOrganizations", ctx, filter)}
+}
+
+func (_c *Store_ListOrganizations_Call) Run(run func(ctx context.Context, filter params.OrganizationFilter)) *Store_ListOrganizations_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.OrganizationFilter))
+ })
+ return _c
+}
+
+func (_c *Store_ListOrganizations_Call) Return(_a0 []params.Organization, _a1 error) *Store_ListOrganizations_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListOrganizations_Call) RunAndReturn(run func(context.Context, params.OrganizationFilter) ([]params.Organization, error)) *Store_ListOrganizations_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListPoolInstances provides a mock function with given fields: ctx, poolID
func (_m *Store) ListPoolInstances(ctx context.Context, poolID string) ([]params.Instance, error) {
ret := _m.Called(ctx, poolID)
+ if len(ret) == 0 {
+ panic("no return value specified for ListPoolInstances")
+ }
+
var r0 []params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
@@ -1256,77 +3916,58 @@ func (_m *Store) ListPoolInstances(ctx context.Context, poolID string) ([]params
return r0, r1
}
-// ListRepoInstances provides a mock function with given fields: ctx, repoID
-func (_m *Store) ListRepoInstances(ctx context.Context, repoID string) ([]params.Instance, error) {
- ret := _m.Called(ctx, repoID)
-
- var r0 []params.Instance
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
- return rf(ctx, repoID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Instance); ok {
- r0 = rf(ctx, repoID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Instance)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, repoID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_ListPoolInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListPoolInstances'
+type Store_ListPoolInstances_Call struct {
+ *mock.Call
}
-// ListRepoPools provides a mock function with given fields: ctx, repoID
-func (_m *Store) ListRepoPools(ctx context.Context, repoID string) ([]params.Pool, error) {
- ret := _m.Called(ctx, repoID)
-
- var r0 []params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Pool, error)); ok {
- return rf(ctx, repoID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Pool); ok {
- r0 = rf(ctx, repoID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Pool)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, repoID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// ListPoolInstances is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+func (_e *Store_Expecter) ListPoolInstances(ctx interface{}, poolID interface{}) *Store_ListPoolInstances_Call {
+ return &Store_ListPoolInstances_Call{Call: _e.mock.On("ListPoolInstances", ctx, poolID)}
}
-// ListRepositories provides a mock function with given fields: ctx
-func (_m *Store) ListRepositories(ctx context.Context) ([]params.Repository, error) {
- ret := _m.Called(ctx)
+func (_c *Store_ListPoolInstances_Call) Run(run func(ctx context.Context, poolID string)) *Store_ListPoolInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_ListPoolInstances_Call) Return(_a0 []params.Instance, _a1 error) *Store_ListPoolInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListPoolInstances_Call) RunAndReturn(run func(context.Context, string) ([]params.Instance, error)) *Store_ListPoolInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListRepositories provides a mock function with given fields: ctx, filter
+func (_m *Store) ListRepositories(ctx context.Context, filter params.RepositoryFilter) ([]params.Repository, error) {
+ ret := _m.Called(ctx, filter)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListRepositories")
+ }
var r0 []params.Repository
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context) ([]params.Repository, error)); ok {
- return rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, params.RepositoryFilter) ([]params.Repository, error)); ok {
+ return rf(ctx, filter)
}
- if rf, ok := ret.Get(0).(func(context.Context) []params.Repository); ok {
- r0 = rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, params.RepositoryFilter) []params.Repository); ok {
+ r0 = rf(ctx, filter)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]params.Repository)
}
}
- if rf, ok := ret.Get(1).(func(context.Context) error); ok {
- r1 = rf(ctx)
+ if rf, ok := ret.Get(1).(func(context.Context, params.RepositoryFilter) error); ok {
+ r1 = rf(ctx, filter)
} else {
r1 = ret.Error(1)
}
@@ -1334,10 +3975,102 @@ func (_m *Store) ListRepositories(ctx context.Context) ([]params.Repository, err
return r0, r1
}
+// Store_ListRepositories_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListRepositories'
+type Store_ListRepositories_Call struct {
+ *mock.Call
+}
+
+// ListRepositories is a helper method to define mock.On call
+// - ctx context.Context
+// - filter params.RepositoryFilter
+func (_e *Store_Expecter) ListRepositories(ctx interface{}, filter interface{}) *Store_ListRepositories_Call {
+ return &Store_ListRepositories_Call{Call: _e.mock.On("ListRepositories", ctx, filter)}
+}
+
+func (_c *Store_ListRepositories_Call) Run(run func(ctx context.Context, filter params.RepositoryFilter)) *Store_ListRepositories_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.RepositoryFilter))
+ })
+ return _c
+}
+
+func (_c *Store_ListRepositories_Call) Return(_a0 []params.Repository, _a1 error) *Store_ListRepositories_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListRepositories_Call) RunAndReturn(run func(context.Context, params.RepositoryFilter) ([]params.Repository, error)) *Store_ListRepositories_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListScaleSetInstances provides a mock function with given fields: _a0, scalesetID
+func (_m *Store) ListScaleSetInstances(_a0 context.Context, scalesetID uint) ([]params.Instance, error) {
+ ret := _m.Called(_a0, scalesetID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListScaleSetInstances")
+ }
+
+ var r0 []params.Instance
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint) ([]params.Instance, error)); ok {
+ return rf(_a0, scalesetID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint) []params.Instance); ok {
+ r0 = rf(_a0, scalesetID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Instance)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint) error); ok {
+ r1 = rf(_a0, scalesetID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListScaleSetInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListScaleSetInstances'
+type Store_ListScaleSetInstances_Call struct {
+ *mock.Call
+}
+
+// ListScaleSetInstances is a helper method to define mock.On call
+// - _a0 context.Context
+// - scalesetID uint
+func (_e *Store_Expecter) ListScaleSetInstances(_a0 interface{}, scalesetID interface{}) *Store_ListScaleSetInstances_Call {
+ return &Store_ListScaleSetInstances_Call{Call: _e.mock.On("ListScaleSetInstances", _a0, scalesetID)}
+}
+
+func (_c *Store_ListScaleSetInstances_Call) Run(run func(_a0 context.Context, scalesetID uint)) *Store_ListScaleSetInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_ListScaleSetInstances_Call) Return(_a0 []params.Instance, _a1 error) *Store_ListScaleSetInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListScaleSetInstances_Call) RunAndReturn(run func(context.Context, uint) ([]params.Instance, error)) *Store_ListScaleSetInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// LockJob provides a mock function with given fields: ctx, jobID, entityID
func (_m *Store) LockJob(ctx context.Context, jobID int64, entityID string) error {
ret := _m.Called(ctx, jobID, entityID)
+ if len(ret) == 0 {
+ panic("no return value specified for LockJob")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64, string) error); ok {
r0 = rf(ctx, jobID, entityID)
@@ -1348,10 +4081,44 @@ func (_m *Store) LockJob(ctx context.Context, jobID int64, entityID string) erro
return r0
}
+// Store_LockJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LockJob'
+type Store_LockJob_Call struct {
+ *mock.Call
+}
+
+// LockJob is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+// - entityID string
+func (_e *Store_Expecter) LockJob(ctx interface{}, jobID interface{}, entityID interface{}) *Store_LockJob_Call {
+ return &Store_LockJob_Call{Call: _e.mock.On("LockJob", ctx, jobID, entityID)}
+}
+
+func (_c *Store_LockJob_Call) Run(run func(ctx context.Context, jobID int64, entityID string)) *Store_LockJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_LockJob_Call) Return(_a0 error) *Store_LockJob_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_LockJob_Call) RunAndReturn(run func(context.Context, int64, string) error) *Store_LockJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// PoolInstanceCount provides a mock function with given fields: ctx, poolID
func (_m *Store) PoolInstanceCount(ctx context.Context, poolID string) (int64, error) {
ret := _m.Called(ctx, poolID)
+ if len(ret) == 0 {
+ panic("no return value specified for PoolInstanceCount")
+ }
+
var r0 int64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (int64, error)); ok {
@@ -1372,10 +4139,139 @@ func (_m *Store) PoolInstanceCount(ctx context.Context, poolID string) (int64, e
return r0, r1
}
+// Store_PoolInstanceCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PoolInstanceCount'
+type Store_PoolInstanceCount_Call struct {
+ *mock.Call
+}
+
+// PoolInstanceCount is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+func (_e *Store_Expecter) PoolInstanceCount(ctx interface{}, poolID interface{}) *Store_PoolInstanceCount_Call {
+ return &Store_PoolInstanceCount_Call{Call: _e.mock.On("PoolInstanceCount", ctx, poolID)}
+}
+
+func (_c *Store_PoolInstanceCount_Call) Run(run func(ctx context.Context, poolID string)) *Store_PoolInstanceCount_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_PoolInstanceCount_Call) Return(_a0 int64, _a1 error) *Store_PoolInstanceCount_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_PoolInstanceCount_Call) RunAndReturn(run func(context.Context, string) (int64, error)) *Store_PoolInstanceCount_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SetScaleSetDesiredRunnerCount provides a mock function with given fields: ctx, scaleSetID, desiredRunnerCount
+func (_m *Store) SetScaleSetDesiredRunnerCount(ctx context.Context, scaleSetID uint, desiredRunnerCount int) error {
+ ret := _m.Called(ctx, scaleSetID, desiredRunnerCount)
+
+ if len(ret) == 0 {
+ panic("no return value specified for SetScaleSetDesiredRunnerCount")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, int) error); ok {
+ r0 = rf(ctx, scaleSetID, desiredRunnerCount)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_SetScaleSetDesiredRunnerCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetScaleSetDesiredRunnerCount'
+type Store_SetScaleSetDesiredRunnerCount_Call struct {
+ *mock.Call
+}
+
+// SetScaleSetDesiredRunnerCount is a helper method to define mock.On call
+// - ctx context.Context
+// - scaleSetID uint
+// - desiredRunnerCount int
+func (_e *Store_Expecter) SetScaleSetDesiredRunnerCount(ctx interface{}, scaleSetID interface{}, desiredRunnerCount interface{}) *Store_SetScaleSetDesiredRunnerCount_Call {
+ return &Store_SetScaleSetDesiredRunnerCount_Call{Call: _e.mock.On("SetScaleSetDesiredRunnerCount", ctx, scaleSetID, desiredRunnerCount)}
+}
+
+func (_c *Store_SetScaleSetDesiredRunnerCount_Call) Run(run func(ctx context.Context, scaleSetID uint, desiredRunnerCount int)) *Store_SetScaleSetDesiredRunnerCount_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(int))
+ })
+ return _c
+}
+
+func (_c *Store_SetScaleSetDesiredRunnerCount_Call) Return(_a0 error) *Store_SetScaleSetDesiredRunnerCount_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_SetScaleSetDesiredRunnerCount_Call) RunAndReturn(run func(context.Context, uint, int) error) *Store_SetScaleSetDesiredRunnerCount_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SetScaleSetLastMessageID provides a mock function with given fields: ctx, scaleSetID, lastMessageID
+func (_m *Store) SetScaleSetLastMessageID(ctx context.Context, scaleSetID uint, lastMessageID int64) error {
+ ret := _m.Called(ctx, scaleSetID, lastMessageID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for SetScaleSetLastMessageID")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, int64) error); ok {
+ r0 = rf(ctx, scaleSetID, lastMessageID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_SetScaleSetLastMessageID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetScaleSetLastMessageID'
+type Store_SetScaleSetLastMessageID_Call struct {
+ *mock.Call
+}
+
+// SetScaleSetLastMessageID is a helper method to define mock.On call
+// - ctx context.Context
+// - scaleSetID uint
+// - lastMessageID int64
+func (_e *Store_Expecter) SetScaleSetLastMessageID(ctx interface{}, scaleSetID interface{}, lastMessageID interface{}) *Store_SetScaleSetLastMessageID_Call {
+ return &Store_SetScaleSetLastMessageID_Call{Call: _e.mock.On("SetScaleSetLastMessageID", ctx, scaleSetID, lastMessageID)}
+}
+
+func (_c *Store_SetScaleSetLastMessageID_Call) Run(run func(ctx context.Context, scaleSetID uint, lastMessageID int64)) *Store_SetScaleSetLastMessageID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(int64))
+ })
+ return _c
+}
+
+func (_c *Store_SetScaleSetLastMessageID_Call) Return(_a0 error) *Store_SetScaleSetLastMessageID_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_SetScaleSetLastMessageID_Call) RunAndReturn(run func(context.Context, uint, int64) error) *Store_SetScaleSetLastMessageID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// UnlockJob provides a mock function with given fields: ctx, jobID, entityID
func (_m *Store) UnlockJob(ctx context.Context, jobID int64, entityID string) error {
ret := _m.Called(ctx, jobID, entityID)
+ if len(ret) == 0 {
+ panic("no return value specified for UnlockJob")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64, string) error); ok {
r0 = rf(ctx, jobID, entityID)
@@ -1386,10 +4282,100 @@ func (_m *Store) UnlockJob(ctx context.Context, jobID int64, entityID string) er
return r0
}
+// Store_UnlockJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnlockJob'
+type Store_UnlockJob_Call struct {
+ *mock.Call
+}
+
+// UnlockJob is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+// - entityID string
+func (_e *Store_Expecter) UnlockJob(ctx interface{}, jobID interface{}, entityID interface{}) *Store_UnlockJob_Call {
+ return &Store_UnlockJob_Call{Call: _e.mock.On("UnlockJob", ctx, jobID, entityID)}
+}
+
+func (_c *Store_UnlockJob_Call) Run(run func(ctx context.Context, jobID int64, entityID string)) *Store_UnlockJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_UnlockJob_Call) Return(_a0 error) *Store_UnlockJob_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_UnlockJob_Call) RunAndReturn(run func(context.Context, int64, string) error) *Store_UnlockJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateController provides a mock function with given fields: info
+func (_m *Store) UpdateController(info params.UpdateControllerParams) (params.ControllerInfo, error) {
+ ret := _m.Called(info)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateController")
+ }
+
+ var r0 params.ControllerInfo
+ var r1 error
+ if rf, ok := ret.Get(0).(func(params.UpdateControllerParams) (params.ControllerInfo, error)); ok {
+ return rf(info)
+ }
+ if rf, ok := ret.Get(0).(func(params.UpdateControllerParams) params.ControllerInfo); ok {
+ r0 = rf(info)
+ } else {
+ r0 = ret.Get(0).(params.ControllerInfo)
+ }
+
+ if rf, ok := ret.Get(1).(func(params.UpdateControllerParams) error); ok {
+ r1 = rf(info)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateController_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateController'
+type Store_UpdateController_Call struct {
+ *mock.Call
+}
+
+// UpdateController is a helper method to define mock.On call
+// - info params.UpdateControllerParams
+func (_e *Store_Expecter) UpdateController(info interface{}) *Store_UpdateController_Call {
+ return &Store_UpdateController_Call{Call: _e.mock.On("UpdateController", info)}
+}
+
+func (_c *Store_UpdateController_Call) Run(run func(info params.UpdateControllerParams)) *Store_UpdateController_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.UpdateControllerParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateController_Call) Return(_a0 params.ControllerInfo, _a1 error) *Store_UpdateController_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateController_Call) RunAndReturn(run func(params.UpdateControllerParams) (params.ControllerInfo, error)) *Store_UpdateController_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// UpdateEnterprise provides a mock function with given fields: ctx, enterpriseID, param
func (_m *Store) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (params.Enterprise, error) {
ret := _m.Called(ctx, enterpriseID, param)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateEnterprise")
+ }
+
var r0 params.Enterprise
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) (params.Enterprise, error)); ok {
@@ -1410,23 +4396,57 @@ func (_m *Store) UpdateEnterprise(ctx context.Context, enterpriseID string, para
return r0, r1
}
-// UpdateEnterprisePool provides a mock function with given fields: ctx, enterpriseID, poolID, param
-func (_m *Store) UpdateEnterprisePool(ctx context.Context, enterpriseID string, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID, poolID, param)
+// Store_UpdateEnterprise_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEnterprise'
+type Store_UpdateEnterprise_Call struct {
+ *mock.Call
+}
+
+// UpdateEnterprise is a helper method to define mock.On call
+// - ctx context.Context
+// - enterpriseID string
+// - param params.UpdateEntityParams
+func (_e *Store_Expecter) UpdateEnterprise(ctx interface{}, enterpriseID interface{}, param interface{}) *Store_UpdateEnterprise_Call {
+ return &Store_UpdateEnterprise_Call{Call: _e.mock.On("UpdateEnterprise", ctx, enterpriseID, param)}
+}
+
+func (_c *Store_UpdateEnterprise_Call) Run(run func(ctx context.Context, enterpriseID string, param params.UpdateEntityParams)) *Store_UpdateEnterprise_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateEntityParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateEnterprise_Call) Return(_a0 params.Enterprise, _a1 error) *Store_UpdateEnterprise_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateEnterprise_Call) RunAndReturn(run func(context.Context, string, params.UpdateEntityParams) (params.Enterprise, error)) *Store_UpdateEnterprise_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateEntityPool provides a mock function with given fields: ctx, entity, poolID, param
+func (_m *Store) UpdateEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
+ ret := _m.Called(ctx, entity, poolID, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateEntityPool")
+ }
var r0 params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, enterpriseID, poolID, param)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string, params.UpdatePoolParams) (params.Pool, error)); ok {
+ return rf(ctx, entity, poolID, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) params.Pool); ok {
- r0 = rf(ctx, enterpriseID, poolID, param)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string, params.UpdatePoolParams) params.Pool); ok {
+ r0 = rf(ctx, entity, poolID, param)
} else {
r0 = ret.Get(0).(params.Pool)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, params.UpdatePoolParams) error); ok {
- r1 = rf(ctx, enterpriseID, poolID, param)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, string, params.UpdatePoolParams) error); ok {
+ r1 = rf(ctx, entity, poolID, param)
} else {
r1 = ret.Error(1)
}
@@ -1434,23 +4454,350 @@ func (_m *Store) UpdateEnterprisePool(ctx context.Context, enterpriseID string,
return r0, r1
}
-// UpdateInstance provides a mock function with given fields: ctx, instanceID, param
-func (_m *Store) UpdateInstance(ctx context.Context, instanceID string, param params.UpdateInstanceParams) (params.Instance, error) {
- ret := _m.Called(ctx, instanceID, param)
+// Store_UpdateEntityPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEntityPool'
+type Store_UpdateEntityPool_Call struct {
+ *mock.Call
+}
+
+// UpdateEntityPool is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - poolID string
+// - param params.UpdatePoolParams
+func (_e *Store_Expecter) UpdateEntityPool(ctx interface{}, entity interface{}, poolID interface{}, param interface{}) *Store_UpdateEntityPool_Call {
+ return &Store_UpdateEntityPool_Call{Call: _e.mock.On("UpdateEntityPool", ctx, entity, poolID, param)}
+}
+
+func (_c *Store_UpdateEntityPool_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, poolID string, param params.UpdatePoolParams)) *Store_UpdateEntityPool_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(string), args[3].(params.UpdatePoolParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateEntityPool_Call) Return(_a0 params.Pool, _a1 error) *Store_UpdateEntityPool_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateEntityPool_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, string, params.UpdatePoolParams) (params.Pool, error)) *Store_UpdateEntityPool_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateEntityScaleSet provides a mock function with given fields: _a0, entity, scaleSetID, param, callback
+func (_m *Store) UpdateEntityScaleSet(_a0 context.Context, entity params.ForgeEntity, scaleSetID uint, param params.UpdateScaleSetParams, callback func(params.ScaleSet, params.ScaleSet) error) (params.ScaleSet, error) {
+ ret := _m.Called(_a0, entity, scaleSetID, param, callback)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateEntityScaleSet")
+ }
+
+ var r0 params.ScaleSet
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, uint, params.UpdateScaleSetParams, func(params.ScaleSet, params.ScaleSet) error) (params.ScaleSet, error)); ok {
+ return rf(_a0, entity, scaleSetID, param, callback)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, uint, params.UpdateScaleSetParams, func(params.ScaleSet, params.ScaleSet) error) params.ScaleSet); ok {
+ r0 = rf(_a0, entity, scaleSetID, param, callback)
+ } else {
+ r0 = ret.Get(0).(params.ScaleSet)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, uint, params.UpdateScaleSetParams, func(params.ScaleSet, params.ScaleSet) error) error); ok {
+ r1 = rf(_a0, entity, scaleSetID, param, callback)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateEntityScaleSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEntityScaleSet'
+type Store_UpdateEntityScaleSet_Call struct {
+ *mock.Call
+}
+
+// UpdateEntityScaleSet is a helper method to define mock.On call
+// - _a0 context.Context
+// - entity params.ForgeEntity
+// - scaleSetID uint
+// - param params.UpdateScaleSetParams
+// - callback func(params.ScaleSet , params.ScaleSet) error
+func (_e *Store_Expecter) UpdateEntityScaleSet(_a0 interface{}, entity interface{}, scaleSetID interface{}, param interface{}, callback interface{}) *Store_UpdateEntityScaleSet_Call {
+ return &Store_UpdateEntityScaleSet_Call{Call: _e.mock.On("UpdateEntityScaleSet", _a0, entity, scaleSetID, param, callback)}
+}
+
+func (_c *Store_UpdateEntityScaleSet_Call) Run(run func(_a0 context.Context, entity params.ForgeEntity, scaleSetID uint, param params.UpdateScaleSetParams, callback func(params.ScaleSet, params.ScaleSet) error)) *Store_UpdateEntityScaleSet_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(uint), args[3].(params.UpdateScaleSetParams), args[4].(func(params.ScaleSet, params.ScaleSet) error))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateEntityScaleSet_Call) Return(updatedScaleSet params.ScaleSet, err error) *Store_UpdateEntityScaleSet_Call {
+ _c.Call.Return(updatedScaleSet, err)
+ return _c
+}
+
+func (_c *Store_UpdateEntityScaleSet_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, uint, params.UpdateScaleSetParams, func(params.ScaleSet, params.ScaleSet) error) (params.ScaleSet, error)) *Store_UpdateEntityScaleSet_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateGiteaCredentials provides a mock function with given fields: ctx, id, param
+func (_m *Store) UpdateGiteaCredentials(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, id, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateGiteaCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.UpdateGiteaCredentialsParams) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, id, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.UpdateGiteaCredentialsParams) params.ForgeCredentials); ok {
+ r0 = rf(ctx, id, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint, params.UpdateGiteaCredentialsParams) error); ok {
+ r1 = rf(ctx, id, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateGiteaCredentials'
+type Store_UpdateGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// UpdateGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+// - param params.UpdateGiteaCredentialsParams
+func (_e *Store_Expecter) UpdateGiteaCredentials(ctx interface{}, id interface{}, param interface{}) *Store_UpdateGiteaCredentials_Call {
+ return &Store_UpdateGiteaCredentials_Call{Call: _e.mock.On("UpdateGiteaCredentials", ctx, id, param)}
+}
+
+func (_c *Store_UpdateGiteaCredentials_Call) Run(run func(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams)) *Store_UpdateGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(params.UpdateGiteaCredentialsParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateGiteaCredentials_Call) Return(gtCreds params.ForgeCredentials, err error) *Store_UpdateGiteaCredentials_Call {
+ _c.Call.Return(gtCreds, err)
+ return _c
+}
+
+func (_c *Store_UpdateGiteaCredentials_Call) RunAndReturn(run func(context.Context, uint, params.UpdateGiteaCredentialsParams) (params.ForgeCredentials, error)) *Store_UpdateGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateGiteaEndpoint provides a mock function with given fields: _a0, name, param
+func (_m *Store) UpdateGiteaEndpoint(_a0 context.Context, name string, param params.UpdateGiteaEndpointParams) (params.ForgeEndpoint, error) {
+ ret := _m.Called(_a0, name, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateGiteaEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateGiteaEndpointParams) (params.ForgeEndpoint, error)); ok {
+ return rf(_a0, name, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateGiteaEndpointParams) params.ForgeEndpoint); ok {
+ r0 = rf(_a0, name, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateGiteaEndpointParams) error); ok {
+ r1 = rf(_a0, name, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateGiteaEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateGiteaEndpoint'
+type Store_UpdateGiteaEndpoint_Call struct {
+ *mock.Call
+}
+
+// UpdateGiteaEndpoint is a helper method to define mock.On call
+// - _a0 context.Context
+// - name string
+// - param params.UpdateGiteaEndpointParams
+func (_e *Store_Expecter) UpdateGiteaEndpoint(_a0 interface{}, name interface{}, param interface{}) *Store_UpdateGiteaEndpoint_Call {
+ return &Store_UpdateGiteaEndpoint_Call{Call: _e.mock.On("UpdateGiteaEndpoint", _a0, name, param)}
+}
+
+func (_c *Store_UpdateGiteaEndpoint_Call) Run(run func(_a0 context.Context, name string, param params.UpdateGiteaEndpointParams)) *Store_UpdateGiteaEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateGiteaEndpointParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateGiteaEndpoint_Call) Return(ghEndpoint params.ForgeEndpoint, err error) *Store_UpdateGiteaEndpoint_Call {
+ _c.Call.Return(ghEndpoint, err)
+ return _c
+}
+
+func (_c *Store_UpdateGiteaEndpoint_Call) RunAndReturn(run func(context.Context, string, params.UpdateGiteaEndpointParams) (params.ForgeEndpoint, error)) *Store_UpdateGiteaEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateGithubCredentials provides a mock function with given fields: ctx, id, param
+func (_m *Store) UpdateGithubCredentials(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, id, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateGithubCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, id, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.UpdateGithubCredentialsParams) params.ForgeCredentials); ok {
+ r0 = rf(ctx, id, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint, params.UpdateGithubCredentialsParams) error); ok {
+ r1 = rf(ctx, id, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateGithubCredentials'
+type Store_UpdateGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// UpdateGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+// - param params.UpdateGithubCredentialsParams
+func (_e *Store_Expecter) UpdateGithubCredentials(ctx interface{}, id interface{}, param interface{}) *Store_UpdateGithubCredentials_Call {
+ return &Store_UpdateGithubCredentials_Call{Call: _e.mock.On("UpdateGithubCredentials", ctx, id, param)}
+}
+
+func (_c *Store_UpdateGithubCredentials_Call) Run(run func(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams)) *Store_UpdateGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(params.UpdateGithubCredentialsParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateGithubCredentials_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_UpdateGithubCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateGithubCredentials_Call) RunAndReturn(run func(context.Context, uint, params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error)) *Store_UpdateGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateGithubEndpoint provides a mock function with given fields: ctx, name, param
+func (_m *Store) UpdateGithubEndpoint(ctx context.Context, name string, param params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error) {
+ ret := _m.Called(ctx, name, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateGithubEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error)); ok {
+ return rf(ctx, name, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateGithubEndpointParams) params.ForgeEndpoint); ok {
+ r0 = rf(ctx, name, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateGithubEndpointParams) error); ok {
+ r1 = rf(ctx, name, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateGithubEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateGithubEndpoint'
+type Store_UpdateGithubEndpoint_Call struct {
+ *mock.Call
+}
+
+// UpdateGithubEndpoint is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - param params.UpdateGithubEndpointParams
+func (_e *Store_Expecter) UpdateGithubEndpoint(ctx interface{}, name interface{}, param interface{}) *Store_UpdateGithubEndpoint_Call {
+ return &Store_UpdateGithubEndpoint_Call{Call: _e.mock.On("UpdateGithubEndpoint", ctx, name, param)}
+}
+
+func (_c *Store_UpdateGithubEndpoint_Call) Run(run func(ctx context.Context, name string, param params.UpdateGithubEndpointParams)) *Store_UpdateGithubEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateGithubEndpointParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateGithubEndpoint_Call) Return(_a0 params.ForgeEndpoint, _a1 error) *Store_UpdateGithubEndpoint_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateGithubEndpoint_Call) RunAndReturn(run func(context.Context, string, params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error)) *Store_UpdateGithubEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateInstance provides a mock function with given fields: ctx, instanceNameOrID, param
+func (_m *Store) UpdateInstance(ctx context.Context, instanceNameOrID string, param params.UpdateInstanceParams) (params.Instance, error) {
+ ret := _m.Called(ctx, instanceNameOrID, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateInstance")
+ }
var r0 params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateInstanceParams) (params.Instance, error)); ok {
- return rf(ctx, instanceID, param)
+ return rf(ctx, instanceNameOrID, param)
}
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateInstanceParams) params.Instance); ok {
- r0 = rf(ctx, instanceID, param)
+ r0 = rf(ctx, instanceNameOrID, param)
} else {
r0 = ret.Get(0).(params.Instance)
}
if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateInstanceParams) error); ok {
- r1 = rf(ctx, instanceID, param)
+ r1 = rf(ctx, instanceNameOrID, param)
} else {
r1 = ret.Error(1)
}
@@ -1458,10 +4805,44 @@ func (_m *Store) UpdateInstance(ctx context.Context, instanceID string, param pa
return r0, r1
}
+// Store_UpdateInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateInstance'
+type Store_UpdateInstance_Call struct {
+ *mock.Call
+}
+
+// UpdateInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - instanceNameOrID string
+// - param params.UpdateInstanceParams
+func (_e *Store_Expecter) UpdateInstance(ctx interface{}, instanceNameOrID interface{}, param interface{}) *Store_UpdateInstance_Call {
+ return &Store_UpdateInstance_Call{Call: _e.mock.On("UpdateInstance", ctx, instanceNameOrID, param)}
+}
+
+func (_c *Store_UpdateInstance_Call) Run(run func(ctx context.Context, instanceNameOrID string, param params.UpdateInstanceParams)) *Store_UpdateInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateInstance_Call) Return(_a0 params.Instance, _a1 error) *Store_UpdateInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateInstance_Call) RunAndReturn(run func(context.Context, string, params.UpdateInstanceParams) (params.Instance, error)) *Store_UpdateInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// UpdateOrganization provides a mock function with given fields: ctx, orgID, param
func (_m *Store) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (params.Organization, error) {
ret := _m.Called(ctx, orgID, param)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateOrganization")
+ }
+
var r0 params.Organization
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) (params.Organization, error)); ok {
@@ -1482,34 +4863,44 @@ func (_m *Store) UpdateOrganization(ctx context.Context, orgID string, param par
return r0, r1
}
-// UpdateOrganizationPool provides a mock function with given fields: ctx, orgID, poolID, param
-func (_m *Store) UpdateOrganizationPool(ctx context.Context, orgID string, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, orgID, poolID, param)
+// Store_UpdateOrganization_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateOrganization'
+type Store_UpdateOrganization_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, orgID, poolID, param)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) params.Pool); ok {
- r0 = rf(ctx, orgID, poolID, param)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
+// UpdateOrganization is a helper method to define mock.On call
+// - ctx context.Context
+// - orgID string
+// - param params.UpdateEntityParams
+func (_e *Store_Expecter) UpdateOrganization(ctx interface{}, orgID interface{}, param interface{}) *Store_UpdateOrganization_Call {
+ return &Store_UpdateOrganization_Call{Call: _e.mock.On("UpdateOrganization", ctx, orgID, param)}
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, params.UpdatePoolParams) error); ok {
- r1 = rf(ctx, orgID, poolID, param)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_UpdateOrganization_Call) Run(run func(ctx context.Context, orgID string, param params.UpdateEntityParams)) *Store_UpdateOrganization_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateEntityParams))
+ })
+ return _c
+}
- return r0, r1
+func (_c *Store_UpdateOrganization_Call) Return(_a0 params.Organization, _a1 error) *Store_UpdateOrganization_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateOrganization_Call) RunAndReturn(run func(context.Context, string, params.UpdateEntityParams) (params.Organization, error)) *Store_UpdateOrganization_Call {
+ _c.Call.Return(run)
+ return _c
}
// UpdateRepository provides a mock function with given fields: ctx, repoID, param
func (_m *Store) UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (params.Repository, error) {
ret := _m.Called(ctx, repoID, param)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateRepository")
+ }
+
var r0 params.Repository
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) (params.Repository, error)); ok {
@@ -1530,34 +4921,44 @@ func (_m *Store) UpdateRepository(ctx context.Context, repoID string, param para
return r0, r1
}
-// UpdateRepositoryPool provides a mock function with given fields: ctx, repoID, poolID, param
-func (_m *Store) UpdateRepositoryPool(ctx context.Context, repoID string, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, repoID, poolID, param)
+// Store_UpdateRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateRepository'
+type Store_UpdateRepository_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, repoID, poolID, param)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) params.Pool); ok {
- r0 = rf(ctx, repoID, poolID, param)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
+// UpdateRepository is a helper method to define mock.On call
+// - ctx context.Context
+// - repoID string
+// - param params.UpdateEntityParams
+func (_e *Store_Expecter) UpdateRepository(ctx interface{}, repoID interface{}, param interface{}) *Store_UpdateRepository_Call {
+ return &Store_UpdateRepository_Call{Call: _e.mock.On("UpdateRepository", ctx, repoID, param)}
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, params.UpdatePoolParams) error); ok {
- r1 = rf(ctx, repoID, poolID, param)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_UpdateRepository_Call) Run(run func(ctx context.Context, repoID string, param params.UpdateEntityParams)) *Store_UpdateRepository_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateEntityParams))
+ })
+ return _c
+}
- return r0, r1
+func (_c *Store_UpdateRepository_Call) Return(_a0 params.Repository, _a1 error) *Store_UpdateRepository_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateRepository_Call) RunAndReturn(run func(context.Context, string, params.UpdateEntityParams) (params.Repository, error)) *Store_UpdateRepository_Call {
+ _c.Call.Return(run)
+ return _c
}
// UpdateUser provides a mock function with given fields: ctx, user, param
func (_m *Store) UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error) {
ret := _m.Called(ctx, user, param)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateUser")
+ }
+
var r0 params.User
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateUserParams) (params.User, error)); ok {
@@ -1578,6 +4979,36 @@ func (_m *Store) UpdateUser(ctx context.Context, user string, param params.Updat
return r0, r1
}
+// Store_UpdateUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateUser'
+type Store_UpdateUser_Call struct {
+ *mock.Call
+}
+
+// UpdateUser is a helper method to define mock.On call
+// - ctx context.Context
+// - user string
+// - param params.UpdateUserParams
+func (_e *Store_Expecter) UpdateUser(ctx interface{}, user interface{}, param interface{}) *Store_UpdateUser_Call {
+ return &Store_UpdateUser_Call{Call: _e.mock.On("UpdateUser", ctx, user, param)}
+}
+
+func (_c *Store_UpdateUser_Call) Run(run func(ctx context.Context, user string, param params.UpdateUserParams)) *Store_UpdateUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateUserParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateUser_Call) Return(_a0 params.User, _a1 error) *Store_UpdateUser_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateUser_Call) RunAndReturn(run func(context.Context, string, params.UpdateUserParams) (params.User, error)) *Store_UpdateUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewStore(t interface {
diff --git a/database/common/store.go b/database/common/store.go
new file mode 100644
index 00000000..0cf5d929
--- /dev/null
+++ b/database/common/store.go
@@ -0,0 +1,193 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm/params"
+)
+
+type GithubEndpointStore interface {
+ CreateGithubEndpoint(ctx context.Context, param params.CreateGithubEndpointParams) (params.ForgeEndpoint, error)
+ GetGithubEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error)
+ ListGithubEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error)
+ UpdateGithubEndpoint(ctx context.Context, name string, param params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error)
+ DeleteGithubEndpoint(ctx context.Context, name string) error
+}
+
+type GithubCredentialsStore interface {
+ CreateGithubCredentials(ctx context.Context, param params.CreateGithubCredentialsParams) (params.ForgeCredentials, error)
+ GetGithubCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error)
+ GetGithubCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error)
+ ListGithubCredentials(ctx context.Context) ([]params.ForgeCredentials, error)
+ UpdateGithubCredentials(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error)
+ DeleteGithubCredentials(ctx context.Context, id uint) error
+}
+
+type RepoStore interface {
+ CreateRepository(ctx context.Context, owner, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (param params.Repository, err error)
+ GetRepository(ctx context.Context, owner, name, endpointName string) (params.Repository, error)
+ GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error)
+ ListRepositories(ctx context.Context, filter params.RepositoryFilter) ([]params.Repository, error)
+ DeleteRepository(ctx context.Context, repoID string) error
+ UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (params.Repository, error)
+}
+
+type OrgStore interface {
+ CreateOrganization(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (org params.Organization, err error)
+ GetOrganization(ctx context.Context, name, endpointName string) (params.Organization, error)
+ GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error)
+ ListOrganizations(ctx context.Context, filter params.OrganizationFilter) ([]params.Organization, error)
+ DeleteOrganization(ctx context.Context, orgID string) error
+ UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (params.Organization, error)
+}
+
+type EnterpriseStore interface {
+ CreateEnterprise(ctx context.Context, name string, credentialsName params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (params.Enterprise, error)
+ GetEnterprise(ctx context.Context, name, endpointName string) (params.Enterprise, error)
+ GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error)
+ ListEnterprises(ctx context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error)
+ DeleteEnterprise(ctx context.Context, enterpriseID string) error
+ UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (params.Enterprise, error)
+}
+
+type PoolStore interface {
+ // Probably a bad idea without some king of filter or at least pagination
+ // nolint:golangci-lint,godox
+ // TODO: add filter/pagination
+ ListAllPools(ctx context.Context) ([]params.Pool, error)
+ GetPoolByID(ctx context.Context, poolID string) (params.Pool, error)
+ DeletePoolByID(ctx context.Context, poolID string) error
+
+ ListPoolInstances(ctx context.Context, poolID string) ([]params.Instance, error)
+
+ PoolInstanceCount(ctx context.Context, poolID string) (int64, error)
+ FindPoolsMatchingAllTags(ctx context.Context, entityType params.ForgeEntityType, entityID string, tags []string) ([]params.Pool, error)
+}
+
+type UserStore interface {
+ GetUser(ctx context.Context, user string) (params.User, error)
+ GetUserByID(ctx context.Context, userID string) (params.User, error)
+ GetAdminUser(ctx context.Context) (params.User, error)
+
+ CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error)
+ UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error)
+ HasAdminUser(ctx context.Context) bool
+}
+
+type InstanceStore interface {
+ CreateInstance(ctx context.Context, poolID string, param params.CreateInstanceParams) (params.Instance, error)
+ DeleteInstance(ctx context.Context, poolID string, instanceNameOrID string) error
+ DeleteInstanceByName(ctx context.Context, instanceName string) error
+ UpdateInstance(ctx context.Context, instanceNameOrID string, param params.UpdateInstanceParams) (params.Instance, error)
+
+ // Probably a bad idea without some king of filter or at least pagination
+ //
+ // nolint:golangci-lint,godox
+ // TODO: add filter/pagination
+ ListAllInstances(ctx context.Context) ([]params.Instance, error)
+
+ GetInstance(ctx context.Context, instanceNameOrID string) (params.Instance, error)
+ AddInstanceEvent(ctx context.Context, instanceNameOrID string, event params.EventType, eventLevel params.EventLevel, eventMessage string) error
+}
+
+type JobsStore interface {
+ CreateOrUpdateJob(ctx context.Context, job params.Job) (params.Job, error)
+ ListEntityJobsByStatus(ctx context.Context, entityType params.ForgeEntityType, entityID string, status params.JobStatus) ([]params.Job, error)
+ ListJobsByStatus(ctx context.Context, status params.JobStatus) ([]params.Job, error)
+ ListAllJobs(ctx context.Context) ([]params.Job, error)
+
+ GetJobByID(ctx context.Context, jobID int64) (params.Job, error)
+ DeleteJob(ctx context.Context, jobID int64) error
+ UnlockJob(ctx context.Context, jobID int64, entityID string) error
+ LockJob(ctx context.Context, jobID int64, entityID string) error
+ BreakLockJobIsQueued(ctx context.Context, jobID int64) error
+
+ DeleteCompletedJobs(ctx context.Context) error
+}
+
+type EntityPoolStore interface {
+ CreateEntityPool(ctx context.Context, entity params.ForgeEntity, param params.CreatePoolParams) (params.Pool, error)
+ GetEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string) (params.Pool, error)
+ DeleteEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string) error
+ UpdateEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string, param params.UpdatePoolParams) (params.Pool, error)
+
+ ListEntityPools(ctx context.Context, entity params.ForgeEntity) ([]params.Pool, error)
+ ListEntityInstances(ctx context.Context, entity params.ForgeEntity) ([]params.Instance, error)
+}
+
+type ControllerStore interface {
+ ControllerInfo() (params.ControllerInfo, error)
+ InitController() (params.ControllerInfo, error)
+ UpdateController(info params.UpdateControllerParams) (params.ControllerInfo, error)
+}
+
+type ScaleSetsStore interface {
+ ListAllScaleSets(ctx context.Context) ([]params.ScaleSet, error)
+ CreateEntityScaleSet(_ context.Context, entity params.ForgeEntity, param params.CreateScaleSetParams) (scaleSet params.ScaleSet, err error)
+ ListEntityScaleSets(_ context.Context, entity params.ForgeEntity) ([]params.ScaleSet, error)
+ UpdateEntityScaleSet(_ context.Context, entity params.ForgeEntity, scaleSetID uint, param params.UpdateScaleSetParams, callback func(old, newSet params.ScaleSet) error) (updatedScaleSet params.ScaleSet, err error)
+ GetScaleSetByID(ctx context.Context, scaleSet uint) (params.ScaleSet, error)
+ DeleteScaleSetByID(ctx context.Context, scaleSetID uint) (err error)
+ SetScaleSetLastMessageID(ctx context.Context, scaleSetID uint, lastMessageID int64) error
+ SetScaleSetDesiredRunnerCount(ctx context.Context, scaleSetID uint, desiredRunnerCount int) error
+}
+
+type ScaleSetInstanceStore interface {
+ ListScaleSetInstances(_ context.Context, scalesetID uint) ([]params.Instance, error)
+ CreateScaleSetInstance(_ context.Context, scaleSetID uint, param params.CreateInstanceParams) (instance params.Instance, err error)
+}
+
+type GiteaEndpointStore interface {
+ CreateGiteaEndpoint(_ context.Context, param params.CreateGiteaEndpointParams) (ghEndpoint params.ForgeEndpoint, err error)
+ ListGiteaEndpoints(_ context.Context) ([]params.ForgeEndpoint, error)
+ DeleteGiteaEndpoint(_ context.Context, name string) (err error)
+ GetGiteaEndpoint(_ context.Context, name string) (params.ForgeEndpoint, error)
+ UpdateGiteaEndpoint(_ context.Context, name string, param params.UpdateGiteaEndpointParams) (ghEndpoint params.ForgeEndpoint, err error)
+}
+
+type GiteaCredentialsStore interface {
+ CreateGiteaCredentials(ctx context.Context, param params.CreateGiteaCredentialsParams) (gtCreds params.ForgeCredentials, err error)
+ GetGiteaCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error)
+ GetGiteaCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error)
+ ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error)
+ UpdateGiteaCredentials(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams) (gtCreds params.ForgeCredentials, err error)
+ DeleteGiteaCredentials(ctx context.Context, id uint) (err error)
+}
+
+//go:generate go run github.com/vektra/mockery/v2@latest
+type Store interface {
+ RepoStore
+ OrgStore
+ EnterpriseStore
+ PoolStore
+ UserStore
+ InstanceStore
+ JobsStore
+ GithubEndpointStore
+ GithubCredentialsStore
+ ControllerStore
+ EntityPoolStore
+ ScaleSetsStore
+ ScaleSetInstanceStore
+ GiteaEndpointStore
+ GiteaCredentialsStore
+
+ ControllerInfo() (params.ControllerInfo, error)
+ InitController() (params.ControllerInfo, error)
+ GetForgeEntity(_ context.Context, entityType params.ForgeEntityType, entityID string) (params.ForgeEntity, error)
+ AddEntityEvent(ctx context.Context, entity params.ForgeEntity, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error
+}
diff --git a/database/common/watcher.go b/database/common/watcher.go
new file mode 100644
index 00000000..94152094
--- /dev/null
+++ b/database/common/watcher.go
@@ -0,0 +1,69 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import "context"
+
+type (
+ DatabaseEntityType string
+ OperationType string
+ PayloadFilterFunc func(ChangePayload) bool
+)
+
+const (
+ RepositoryEntityType DatabaseEntityType = "repository"
+ OrganizationEntityType DatabaseEntityType = "organization"
+ EnterpriseEntityType DatabaseEntityType = "enterprise"
+ PoolEntityType DatabaseEntityType = "pool"
+ UserEntityType DatabaseEntityType = "user"
+ InstanceEntityType DatabaseEntityType = "instance"
+ JobEntityType DatabaseEntityType = "job"
+ ControllerEntityType DatabaseEntityType = "controller"
+ GithubCredentialsEntityType DatabaseEntityType = "github_credentials" // #nosec G101
+ GiteaCredentialsEntityType DatabaseEntityType = "gitea_credentials" // #nosec G101
+ GithubEndpointEntityType DatabaseEntityType = "github_endpoint"
+ ScaleSetEntityType DatabaseEntityType = "scaleset"
+)
+
+const (
+ CreateOperation OperationType = "create"
+ UpdateOperation OperationType = "update"
+ DeleteOperation OperationType = "delete"
+)
+
+type ChangePayload struct {
+ EntityType DatabaseEntityType `json:"entity-type"`
+ Operation OperationType `json:"operation"`
+ Payload interface{} `json:"payload"`
+}
+
+type Consumer interface {
+ Watch() <-chan ChangePayload
+ IsClosed() bool
+ Close()
+ SetFilters(filters ...PayloadFilterFunc)
+}
+
+type Producer interface {
+ Notify(ChangePayload) error
+ IsClosed() bool
+ Close()
+}
+
+type Watcher interface {
+ RegisterProducer(ctx context.Context, ID string) (Producer, error)
+ RegisterConsumer(ctx context.Context, ID string, filters ...PayloadFilterFunc) (Consumer, error)
+ Close()
+}
diff --git a/database/sql/common_test.go b/database/sql/common_test.go
new file mode 100644
index 00000000..a3c62e06
--- /dev/null
+++ b/database/sql/common_test.go
@@ -0,0 +1,21 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+const (
+ wrongPassphrase = "wrong-passphrase"
+ webhookSecret = "webhook-secret"
+ falseString = "false"
+)
diff --git a/database/sql/controller.go b/database/sql/controller.go
index 7c2baf65..5bf60763 100644
--- a/database/sql/controller.go
+++ b/database/sql/controller.go
@@ -15,26 +15,52 @@
package sql
import (
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm/params"
+ "errors"
+ "fmt"
+ "net/url"
"github.com/google/uuid"
- "github.com/pkg/errors"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/util/appdefaults"
)
+func dbControllerToCommonController(dbInfo ControllerInfo) (params.ControllerInfo, error) {
+ url, err := url.JoinPath(dbInfo.WebhookBaseURL, dbInfo.ControllerID.String())
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error joining webhook URL: %w", err)
+ }
+
+ return params.ControllerInfo{
+ ControllerID: dbInfo.ControllerID,
+ MetadataURL: dbInfo.MetadataURL,
+ WebhookURL: dbInfo.WebhookBaseURL,
+ ControllerWebhookURL: url,
+ CallbackURL: dbInfo.CallbackURL,
+ MinimumJobAgeBackoff: dbInfo.MinimumJobAgeBackoff,
+ Version: appdefaults.GetVersion(),
+ }, nil
+}
+
func (s *sqlDatabase) ControllerInfo() (params.ControllerInfo, error) {
var info ControllerInfo
q := s.conn.Model(&ControllerInfo{}).First(&info)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return params.ControllerInfo{}, errors.Wrap(runnerErrors.ErrNotFound, "fetching controller info")
+ return params.ControllerInfo{}, fmt.Errorf("error fetching controller info: %w", runnerErrors.ErrNotFound)
}
- return params.ControllerInfo{}, errors.Wrap(q.Error, "fetching controller info")
+ return params.ControllerInfo{}, fmt.Errorf("error fetching controller info: %w", q.Error)
}
- return params.ControllerInfo{
- ControllerID: info.ControllerID,
- }, nil
+
+ paramInfo, err := dbControllerToCommonController(info)
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error converting controller info: %w", err)
+ }
+
+ return paramInfo, nil
}
func (s *sqlDatabase) InitController() (params.ControllerInfo, error) {
@@ -44,19 +70,73 @@ func (s *sqlDatabase) InitController() (params.ControllerInfo, error) {
newID, err := uuid.NewRandom()
if err != nil {
- return params.ControllerInfo{}, errors.Wrap(err, "generating UUID")
+ return params.ControllerInfo{}, fmt.Errorf("error generating UUID: %w", err)
}
newInfo := ControllerInfo{
- ControllerID: newID,
+ ControllerID: newID,
+ MinimumJobAgeBackoff: 30,
}
q := s.conn.Save(&newInfo)
if q.Error != nil {
- return params.ControllerInfo{}, errors.Wrap(q.Error, "saving controller info")
+ return params.ControllerInfo{}, fmt.Errorf("error saving controller info: %w", q.Error)
}
return params.ControllerInfo{
ControllerID: newInfo.ControllerID,
}, nil
}
+
+func (s *sqlDatabase) UpdateController(info params.UpdateControllerParams) (paramInfo params.ControllerInfo, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.ControllerEntityType, common.UpdateOperation, paramInfo)
+ }
+ }()
+ var dbInfo ControllerInfo
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Model(&ControllerInfo{}).First(&dbInfo)
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching controller info: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching controller info: %w", q.Error)
+ }
+
+ if err := info.Validate(); err != nil {
+ return fmt.Errorf("error validating controller info: %w", err)
+ }
+
+ if info.MetadataURL != nil {
+ dbInfo.MetadataURL = *info.MetadataURL
+ }
+
+ if info.CallbackURL != nil {
+ dbInfo.CallbackURL = *info.CallbackURL
+ }
+
+ if info.WebhookURL != nil {
+ dbInfo.WebhookBaseURL = *info.WebhookURL
+ }
+
+ if info.MinimumJobAgeBackoff != nil {
+ dbInfo.MinimumJobAgeBackoff = *info.MinimumJobAgeBackoff
+ }
+
+ q = tx.Save(&dbInfo)
+ if q.Error != nil {
+ return fmt.Errorf("error saving controller info: %w", q.Error)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error updating controller info: %w", err)
+ }
+
+ paramInfo, err = dbControllerToCommonController(dbInfo)
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error converting controller info: %w", err)
+ }
+ return paramInfo, nil
+}
diff --git a/database/sql/controller_test.go b/database/sql/controller_test.go
index 7f82160c..949f675f 100644
--- a/database/sql/controller_test.go
+++ b/database/sql/controller_test.go
@@ -19,11 +19,11 @@ import (
"fmt"
"testing"
+ "github.com/stretchr/testify/suite"
+
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
-
- "github.com/stretchr/testify/suite"
+ garmTesting "github.com/cloudbase/garm/internal/testing" //nolint:typecheck
)
type CtrlTestSuite struct {
@@ -69,6 +69,5 @@ func (s *CtrlTestSuite) TestInitControllerAlreadyInitialized() {
}
func TestCtrlTestSuite(t *testing.T) {
- t.Parallel()
suite.Run(t, new(CtrlTestSuite))
}
diff --git a/database/sql/enterprise.go b/database/sql/enterprise.go
index 0a1ea81b..d201cd21 100644
--- a/database/sql/enterprise.go
+++ b/database/sql/enterprise.go
@@ -1,84 +1,140 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package sql
import (
"context"
+ "errors"
+ "fmt"
+ "log/slog"
+
+ "github.com/google/uuid"
+ "gorm.io/gorm"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/database/common"
"github.com/cloudbase/garm/params"
-
- "github.com/google/uuid"
- "github.com/pkg/errors"
- "gorm.io/datatypes"
- "gorm.io/gorm"
)
-func (s *sqlDatabase) CreateEnterprise(ctx context.Context, name, credentialsName, webhookSecret string) (params.Enterprise, error) {
+func (s *sqlDatabase) CreateEnterprise(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (paramEnt params.Enterprise, err error) {
if webhookSecret == "" {
return params.Enterprise{}, errors.New("creating enterprise: missing secret")
}
- secret, err := util.Aes256EncodeString(webhookSecret, s.cfg.Passphrase)
- if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "encoding secret")
+ if credentials.ForgeType != params.GithubEndpointType {
+ return params.Enterprise{}, fmt.Errorf("enterprises are not supported on this forge type: %w", runnerErrors.ErrBadRequest)
}
+
+ secret, err := util.Seal([]byte(webhookSecret), []byte(s.cfg.Passphrase))
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error encoding secret: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.EnterpriseEntityType, common.CreateOperation, paramEnt)
+ }
+ }()
newEnterprise := Enterprise{
- Name: name,
- WebhookSecret: secret,
- CredentialsName: credentialsName,
+ Name: name,
+ WebhookSecret: secret,
+ PoolBalancerType: poolBalancerType,
}
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ newEnterprise.CredentialsID = &credentials.ID
+ newEnterprise.EndpointName = &credentials.Endpoint.Name
- q := s.conn.Create(&newEnterprise)
- if q.Error != nil {
- return params.Enterprise{}, errors.Wrap(q.Error, "creating enterprise")
- }
+ q := tx.Create(&newEnterprise)
+ if q.Error != nil {
+ return fmt.Errorf("error creating enterprise: %w", q.Error)
+ }
- param, err := s.sqlToCommonEnterprise(newEnterprise)
+ newEnterprise, err = s.getEnterpriseByID(ctx, tx, newEnterprise.ID.String(), "Pools", "Credentials", "Endpoint", "Credentials.Endpoint")
+ if err != nil {
+ return fmt.Errorf("error creating enterprise: %w", err)
+ }
+ return nil
+ })
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "creating enterprise")
+ return params.Enterprise{}, fmt.Errorf("error creating enterprise: %w", err)
}
- return param, nil
+ ret, err := s.GetEnterpriseByID(ctx, newEnterprise.ID.String())
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error creating enterprise: %w", err)
+ }
+
+ return ret, nil
}
-func (s *sqlDatabase) GetEnterprise(ctx context.Context, name string) (params.Enterprise, error) {
- enterprise, err := s.getEnterprise(ctx, name)
+func (s *sqlDatabase) GetEnterprise(ctx context.Context, name, endpointName string) (params.Enterprise, error) {
+ enterprise, err := s.getEnterprise(ctx, name, endpointName)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
- param, err := s.sqlToCommonEnterprise(enterprise)
+ param, err := s.sqlToCommonEnterprise(enterprise, true)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
return param, nil
}
func (s *sqlDatabase) GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error) {
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID, "Pools")
+ preloadList := []string{
+ "Pools",
+ "Credentials",
+ "Endpoint",
+ "Credentials.Endpoint",
+ "Events",
+ }
+ enterprise, err := s.getEnterpriseByID(ctx, s.conn, enterpriseID, preloadList...)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
- param, err := s.sqlToCommonEnterprise(enterprise)
+ param, err := s.sqlToCommonEnterprise(enterprise, true)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) ListEnterprises(ctx context.Context) ([]params.Enterprise, error) {
+func (s *sqlDatabase) ListEnterprises(_ context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error) {
var enterprises []Enterprise
- q := s.conn.Find(&enterprises)
+ q := s.conn.
+ Preload("Credentials").
+ Preload("Credentials.Endpoint").
+ Preload("Endpoint")
+ if filter.Name != "" {
+ q = q.Where("name = ?", filter.Name)
+ }
+ if filter.Endpoint != "" {
+ q = q.Where("endpoint_name = ?", filter.Endpoint)
+ }
+ q = q.Find(&enterprises)
if q.Error != nil {
- return []params.Enterprise{}, errors.Wrap(q.Error, "fetching enterprises")
+ return []params.Enterprise{}, fmt.Errorf("error fetching enterprises: %w", q.Error)
}
ret := make([]params.Enterprise, len(enterprises))
for idx, val := range enterprises {
var err error
- ret[idx], err = s.sqlToCommonEnterprise(val)
+ ret[idx], err = s.sqlToCommonEnterprise(val, true)
if err != nil {
- return nil, errors.Wrap(err, "fetching enterprises")
+ return nil, fmt.Errorf("error fetching enterprises: %w", err)
}
}
@@ -86,201 +142,122 @@ func (s *sqlDatabase) ListEnterprises(ctx context.Context) ([]params.Enterprise,
}
func (s *sqlDatabase) DeleteEnterprise(ctx context.Context, enterpriseID string) error {
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID)
+ enterprise, err := s.getEnterpriseByID(ctx, s.conn, enterpriseID, "Endpoint", "Credentials", "Credentials.Endpoint")
if err != nil {
- return errors.Wrap(err, "fetching enterprise")
+ return fmt.Errorf("error fetching enterprise: %w", err)
}
+ defer func(ent Enterprise) {
+ if err == nil {
+ asParams, innerErr := s.sqlToCommonEnterprise(ent, true)
+ if innerErr == nil {
+ s.sendNotify(common.EnterpriseEntityType, common.DeleteOperation, asParams)
+ } else {
+ slog.With(slog.Any("error", innerErr)).ErrorContext(ctx, "error sending delete notification", "enterprise", enterpriseID)
+ }
+ }
+ }(enterprise)
+
q := s.conn.Unscoped().Delete(&enterprise)
if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting enterprise")
+ return fmt.Errorf("error deleting enterprise: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (params.Enterprise, error) {
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
- }
-
- if param.CredentialsName != "" {
- enterprise.CredentialsName = param.CredentialsName
- }
-
- if param.WebhookSecret != "" {
- secret, err := util.Aes256EncodeString(param.WebhookSecret, s.cfg.Passphrase)
- if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "encoding secret")
+func (s *sqlDatabase) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (newParams params.Enterprise, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.EnterpriseEntityType, common.UpdateOperation, newParams)
+ }
+ }()
+ var enterprise Enterprise
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var err error
+ enterprise, err = s.getEnterpriseByID(ctx, tx, enterpriseID)
+ if err != nil {
+ return fmt.Errorf("error fetching enterprise: %w", err)
}
- enterprise.WebhookSecret = secret
- }
- q := s.conn.Save(&enterprise)
- if q.Error != nil {
- return params.Enterprise{}, errors.Wrap(q.Error, "saving enterprise")
- }
+ if enterprise.EndpointName == nil {
+ return fmt.Errorf("error enterprise has no endpoint: %w", runnerErrors.ErrUnprocessable)
+ }
- newParams, err := s.sqlToCommonEnterprise(enterprise)
+ if param.CredentialsName != "" {
+ creds, err = s.getGithubCredentialsByName(ctx, tx, param.CredentialsName, false)
+ if err != nil {
+ return fmt.Errorf("error fetching credentials: %w", err)
+ }
+ if creds.EndpointName == nil {
+ return fmt.Errorf("error credentials have no endpoint: %w", runnerErrors.ErrUnprocessable)
+ }
+
+ if *creds.EndpointName != *enterprise.EndpointName {
+ return fmt.Errorf("error endpoint mismatch: %w", runnerErrors.ErrBadRequest)
+ }
+ enterprise.CredentialsID = &creds.ID
+ }
+ if param.WebhookSecret != "" {
+ secret, err := util.Seal([]byte(param.WebhookSecret), []byte(s.cfg.Passphrase))
+ if err != nil {
+ return fmt.Errorf("error encoding secret: %w", err)
+ }
+ enterprise.WebhookSecret = secret
+ }
+
+ if param.PoolBalancerType != "" {
+ enterprise.PoolBalancerType = param.PoolBalancerType
+ }
+
+ q := tx.Save(&enterprise)
+ if q.Error != nil {
+ return fmt.Errorf("error saving enterprise: %w", q.Error)
+ }
+
+ return nil
+ })
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "updating enterprise")
+ return params.Enterprise{}, fmt.Errorf("error updating enterprise: %w", err)
+ }
+
+ enterprise, err = s.getEnterpriseByID(ctx, s.conn, enterpriseID, "Endpoint", "Credentials", "Credentials.Endpoint")
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error updating enterprise: %w", err)
+ }
+ newParams, err = s.sqlToCommonEnterprise(enterprise, true)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error updating enterprise: %w", err)
}
return newParams, nil
}
-func (s *sqlDatabase) CreateEnterprisePool(ctx context.Context, enterpriseID string, param params.CreatePoolParams) (params.Pool, error) {
- if len(param.Tags) == 0 {
- return params.Pool{}, runnerErrors.NewBadRequestError("no tags specified")
- }
-
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching enterprise")
- }
-
- newPool := Pool{
- ProviderName: param.ProviderName,
- MaxRunners: param.MaxRunners,
- MinIdleRunners: param.MinIdleRunners,
- RunnerPrefix: param.GetRunnerPrefix(),
- Image: param.Image,
- Flavor: param.Flavor,
- OSType: param.OSType,
- OSArch: param.OSArch,
- EnterpriseID: &enterprise.ID,
- Enabled: param.Enabled,
- RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
- }
-
- if len(param.ExtraSpecs) > 0 {
- newPool.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
- }
-
- _, err = s.getEnterprisePoolByUniqueFields(ctx, enterpriseID, newPool.ProviderName, newPool.Image, newPool.Flavor)
- if err != nil {
- if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Pool{}, errors.Wrap(err, "creating pool")
- }
- } else {
- return params.Pool{}, runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider")
- }
-
- tags := []Tag{}
- for _, val := range param.Tags {
- t, err := s.getOrCreateTag(val)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching tag")
- }
- tags = append(tags, t)
- }
-
- q := s.conn.Create(&newPool)
- if q.Error != nil {
- return params.Pool{}, errors.Wrap(q.Error, "adding pool")
- }
-
- for _, tt := range tags {
- if err := s.conn.Model(&newPool).Association("Tags").Append(&tt); err != nil {
- return params.Pool{}, errors.Wrap(err, "saving tag")
- }
- }
-
- pool, err := s.getPoolByID(ctx, newPool.ID.String(), "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) GetEnterprisePool(ctx context.Context, enterpriseID, poolID string) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.EnterprisePool, enterpriseID, poolID, "Tags", "Instances")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) DeleteEnterprisePool(ctx context.Context, enterpriseID, poolID string) error {
- pool, err := s.getEntityPool(ctx, params.EnterprisePool, enterpriseID, poolID)
- if err != nil {
- return errors.Wrap(err, "looking up enterprise pool")
- }
- q := s.conn.Unscoped().Delete(&pool)
- if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting pool")
- }
- return nil
-}
-
-func (s *sqlDatabase) UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.EnterprisePool, enterpriseID, poolID, "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.updatePool(pool, param)
-}
-
-func (s *sqlDatabase) FindEnterprisePoolByTags(ctx context.Context, enterpriseID string, tags []string) (params.Pool, error) {
- pool, err := s.findPoolByTags(enterpriseID, params.EnterprisePool, tags)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool[0], nil
-}
-
-func (s *sqlDatabase) ListEnterprisePools(ctx context.Context, enterpriseID string) ([]params.Pool, error) {
- pools, err := s.listEntityPools(ctx, params.EnterprisePool, enterpriseID, "Tags", "Instances")
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
-
- ret := make([]params.Pool, len(pools))
- for idx, pool := range pools {
- ret[idx] = s.sqlToCommonPool(pool)
- }
-
- return ret, nil
-}
-
-func (s *sqlDatabase) ListEnterpriseInstances(ctx context.Context, enterpriseID string) ([]params.Instance, error) {
- pools, err := s.listEntityPools(ctx, params.EnterprisePool, enterpriseID, "Instances", "Tags")
- if err != nil {
- return nil, errors.Wrap(err, "fetching enterprise")
- }
- ret := []params.Instance{}
- for _, pool := range pools {
- for _, instance := range pool.Instances {
- ret = append(ret, s.sqlToParamsInstance(instance))
- }
- }
- return ret, nil
-}
-
-func (s *sqlDatabase) getEnterprise(ctx context.Context, name string) (Enterprise, error) {
+func (s *sqlDatabase) getEnterprise(_ context.Context, name, endpointName string) (Enterprise, error) {
var enterprise Enterprise
- q := s.conn.Where("name = ? COLLATE NOCASE", name)
- q = q.First(&enterprise)
+ q := s.conn.Where("name = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE", name, endpointName).
+ Preload("Credentials").
+ Preload("Credentials.Endpoint").
+ Preload("Endpoint").
+ First(&enterprise)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Enterprise{}, runnerErrors.ErrNotFound
}
- return Enterprise{}, errors.Wrap(q.Error, "fetching enterprise from database")
+ return Enterprise{}, fmt.Errorf("error fetching enterprise from database: %w", q.Error)
}
return enterprise, nil
}
-func (s *sqlDatabase) getEnterpriseByID(ctx context.Context, id string, preload ...string) (Enterprise, error) {
+func (s *sqlDatabase) getEnterpriseByID(_ context.Context, tx *gorm.DB, id string, preload ...string) (Enterprise, error) {
u, err := uuid.Parse(id)
if err != nil {
- return Enterprise{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return Enterprise{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
var enterprise Enterprise
- q := s.conn
+ q := tx
if len(preload) > 0 {
for _, field := range preload {
q = q.Preload(field)
@@ -292,26 +269,7 @@ func (s *sqlDatabase) getEnterpriseByID(ctx context.Context, id string, preload
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Enterprise{}, runnerErrors.ErrNotFound
}
- return Enterprise{}, errors.Wrap(q.Error, "fetching enterprise from database")
+ return Enterprise{}, fmt.Errorf("error fetching enterprise from database: %w", q.Error)
}
return enterprise, nil
}
-
-func (s *sqlDatabase) getEnterprisePoolByUniqueFields(ctx context.Context, enterpriseID string, provider, image, flavor string) (Pool, error) {
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching enterprise")
- }
-
- q := s.conn
- var pool []Pool
- err = q.Model(&enterprise).Association("Pools").Find(&pool, "provider_name = ? and image = ? and flavor = ?", provider, image, flavor)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching pool")
- }
- if len(pool) == 0 {
- return Pool{}, runnerErrors.ErrNotFound
- }
-
- return pool[0], nil
-}
diff --git a/database/sql/enterprise_test.go b/database/sql/enterprise_test.go
index 91e45898..9192a362 100644
--- a/database/sql/enterprise_test.go
+++ b/database/sql/enterprise_test.go
@@ -22,17 +22,16 @@ import (
"sort"
"testing"
- "github.com/cloudbase/garm/params"
-
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
-
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ "github.com/cloudbase/garm/auth"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type EnterpriseTestFixtures struct {
@@ -50,6 +49,15 @@ type EnterpriseTestSuite struct {
Store dbCommon.Store
StoreSQLMocked *sqlDatabase
Fixtures *EnterpriseTestFixtures
+
+ adminCtx context.Context
+ adminUserID string
+
+ testCreds params.ForgeCredentials
+ ghesCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ ghesEndpoint params.ForgeEndpoint
}
func (s *EnterpriseTestSuite) equalInstancesByName(expected, actual []params.Instance) {
@@ -78,17 +86,29 @@ func (s *EnterpriseTestSuite) SetupTest() {
}
s.Store = db
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+ s.adminUserID = auth.UserID(adminCtx)
+ s.Require().NotEmpty(s.adminUserID)
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.ghesEndpoint = garmTesting.CreateGHESEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.ghesCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "ghes-creds", db, s.T(), s.ghesEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create some enterprise objects in the database, for testing purposes
enterprises := []params.Enterprise{}
for i := 1; i <= 3; i++ {
enterprise, err := db.CreateEnterprise(
- context.Background(),
+ s.adminCtx,
fmt.Sprintf("test-enterprise-%d", i),
- fmt.Sprintf("test-creds-%d", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%d", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
- s.FailNow(fmt.Sprintf("failed to create database object (test-enterprise-%d)", i))
+ s.FailNow(fmt.Sprintf("failed to create database object (test-enterprise-%d): %q", i, err))
}
enterprises = append(enterprises, enterprise)
@@ -105,7 +125,7 @@ func (s *EnterpriseTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -124,7 +144,7 @@ func (s *EnterpriseTestSuite) SetupTest() {
Enterprises: enterprises,
CreateEnterpriseParams: params.CreateEnterpriseParams{
Name: "new-test-enterprise",
- CredentialsName: "new-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "new-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -136,14 +156,14 @@ func (s *EnterpriseTestSuite) SetupTest() {
Flavor: "test-flavor",
OSType: "linux",
OSArch: "amd64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"amd64-linux-runner"},
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
OSType: "linux",
},
UpdateRepoParams: params.UpdateEntityParams{
- CredentialsName: "test-update-creds",
+ CredentialsName: s.secondaryTestCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -160,19 +180,20 @@ func (s *EnterpriseTestSuite) SetupTest() {
func (s *EnterpriseTestSuite) TestCreateEnterprise() {
// call tested function
enterprise, err := s.Store.CreateEnterprise(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateEnterpriseParams.Name,
- s.Fixtures.CreateEnterpriseParams.CredentialsName,
- s.Fixtures.CreateEnterpriseParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateEnterpriseParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
// assertions
s.Require().Nil(err)
- storeEnterprise, err := s.Store.GetEnterpriseByID(context.Background(), enterprise.ID)
+ storeEnterprise, err := s.Store.GetEnterpriseByID(s.adminCtx, enterprise.ID)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get enterprise by id: %v", err))
}
s.Require().Equal(storeEnterprise.Name, enterprise.Name)
- s.Require().Equal(storeEnterprise.CredentialsName, enterprise.CredentialsName)
+ s.Require().Equal(storeEnterprise.Credentials.Name, enterprise.Credentials.Name)
s.Require().Equal(storeEnterprise.WebhookSecret, enterprise.WebhookSecret)
}
@@ -183,20 +204,21 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseInvalidDBPassphrase() {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
// make sure we use a 'sqlDatabase' struct with a wrong 'cfg.Passphrase'
- cfg.Passphrase = "wrong-passphrase" // it must have a size different than 32
+ cfg.Passphrase = wrongPassphrase // it must have a size different than 32
sqlDB := &sqlDatabase{
conn: conn,
cfg: cfg,
}
_, err = sqlDB.CreateEnterprise(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateEnterpriseParams.Name,
- s.Fixtures.CreateEnterpriseParams.CredentialsName,
- s.Fixtures.CreateEnterpriseParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateEnterpriseParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
s.Require().NotNil(err)
- s.Require().Equal("encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterpriseDBCreateErr() {
@@ -207,18 +229,19 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseDBCreateErr() {
s.Fixtures.SQLMock.ExpectRollback()
_, err := s.StoreSQLMocked.CreateEnterprise(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateEnterpriseParams.Name,
- s.Fixtures.CreateEnterpriseParams.CredentialsName,
- s.Fixtures.CreateEnterpriseParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateEnterpriseParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating enterprise: creating enterprise mock error", err.Error())
+ s.Require().Equal("error creating enterprise: error creating enterprise: creating enterprise mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestGetEnterprise() {
- enterprise, err := s.Store.GetEnterprise(context.Background(), s.Fixtures.Enterprises[0].Name)
+ enterprise, err := s.Store.GetEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].Name, s.Fixtures.Enterprises[0].Endpoint.Name)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Enterprises[0].Name, enterprise.Name)
@@ -226,71 +249,121 @@ func (s *EnterpriseTestSuite) TestGetEnterprise() {
}
func (s *EnterpriseTestSuite) TestGetEnterpriseCaseInsensitive() {
- enterprise, err := s.Store.GetEnterprise(context.Background(), "TeSt-eNtErPriSe-1")
+ enterprise, err := s.Store.GetEnterprise(s.adminCtx, "TeSt-eNtErPriSe-1", "github.com")
s.Require().Nil(err)
s.Require().Equal("test-enterprise-1", enterprise.Name)
}
func (s *EnterpriseTestSuite) TestGetEnterpriseNotFound() {
- _, err := s.Store.GetEnterprise(context.Background(), "dummy-name")
+ _, err := s.Store.GetEnterprise(s.adminCtx, "dummy-name", "github.com")
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: not found", err.Error())
+ s.Require().Equal("error fetching enterprise: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestGetEnterpriseDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE name = ? COLLATE NOCASE AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].Name).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE (name = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE) AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].Name, s.Fixtures.Enterprises[0].Endpoint.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow(s.Fixtures.Enterprises[0].Name))
- _, err := s.StoreSQLMocked.GetEnterprise(context.Background(), s.Fixtures.Enterprises[0].Name)
+ _, err := s.StoreSQLMocked.GetEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].Name, s.Fixtures.Enterprises[0].Endpoint.Name)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: missing secret", err.Error())
+ s.Require().Equal("error fetching enterprise: missing secret", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestListEnterprises() {
- enterprises, err := s.Store.ListEnterprises(context.Background())
+ enterprises, err := s.Store.ListEnterprises(s.adminCtx, params.EnterpriseFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), s.Fixtures.Enterprises, enterprises)
}
+func (s *EnterpriseTestSuite) TestListEnterprisesWithFilter() {
+ enterprise, err := s.Store.CreateEnterprise(
+ s.adminCtx,
+ "test-enterprise",
+ s.ghesCreds,
+ "test-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ enterprise2, err := s.Store.CreateEnterprise(
+ s.adminCtx,
+ "test-enterprise",
+ s.testCreds,
+ "test-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ enterprise3, err := s.Store.CreateEnterprise(
+ s.adminCtx,
+ "test-enterprise2",
+ s.testCreds,
+ "test-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ enterprises, err := s.Store.ListEnterprises(s.adminCtx, params.EnterpriseFilter{
+ Name: "test-enterprise",
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise, enterprise2}, enterprises)
+
+ enterprises, err = s.Store.ListEnterprises(s.adminCtx, params.EnterpriseFilter{
+ Name: "test-enterprise",
+ Endpoint: s.ghesEndpoint.Name,
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise}, enterprises)
+
+ enterprises, err = s.Store.ListEnterprises(s.adminCtx, params.EnterpriseFilter{
+ Name: "test-enterprise2",
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise3}, enterprises)
+}
+
func (s *EnterpriseTestSuite) TestListEnterprisesDBFetchErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE `enterprises`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("fetching user from database mock error"))
- _, err := s.StoreSQLMocked.ListEnterprises(context.Background())
+ _, err := s.StoreSQLMocked.ListEnterprises(s.adminCtx, params.EnterpriseFilter{})
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprises: fetching user from database mock error", err.Error())
+ s.Require().Equal("error fetching enterprises: fetching user from database mock error", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprise() {
- err := s.Store.DeleteEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID)
+ err := s.Store.DeleteEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID)
s.Require().Nil(err)
- _, err = s.Store.GetEnterpriseByID(context.Background(), s.Fixtures.Enterprises[0].ID)
+ _, err = s.Store.GetEnterpriseByID(s.adminCtx, s.Fixtures.Enterprises[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: not found", err.Error())
+ s.Require().Equal("error fetching enterprise: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterpriseInvalidEnterpriseID() {
- err := s.Store.DeleteEnterprise(context.Background(), "dummy-enterprise-id")
+ err := s.Store.DeleteEnterprise(s.adminCtx, "dummy-enterprise-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching enterprise: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterpriseDBDeleteErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -299,114 +372,153 @@ func (s *EnterpriseTestSuite) TestDeleteEnterpriseDBDeleteErr() {
WillReturnError(fmt.Errorf("mocked delete enterprise error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID)
+ err := s.StoreSQLMocked.DeleteEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("deleting enterprise: mocked delete enterprise error", err.Error())
+ s.Require().Equal("error deleting enterprise: mocked delete enterprise error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestUpdateEnterprise() {
- enterprise, err := s.Store.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
+ enterprise, err := s.Store.UpdateEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, enterprise.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, enterprise.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, enterprise.WebhookSecret)
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseInvalidEnterpriseID() {
- _, err := s.Store.UpdateEnterprise(context.Background(), "dummy-enterprise-id", s.Fixtures.UpdateRepoParams)
+ _, err := s.Store.UpdateEnterprise(s.adminCtx, "dummy-enterprise-id", s.Fixtures.UpdateRepoParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error updating enterprise: error fetching enterprise: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBEncryptErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
-
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Enterprises[0].ID, s.Fixtures.Enterprises[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error updating enterprise: error encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBSaveErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Enterprises[0].ID, s.Fixtures.Enterprises[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
s.Fixtures.SQLMock.
ExpectExec(("UPDATE `enterprises` SET")).
WillReturnError(fmt.Errorf("saving enterprise mock error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving enterprise: saving enterprise mock error", err.Error())
+ s.Require().Equal("error updating enterprise: error saving enterprise: saving enterprise mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBDecryptingErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
- s.Fixtures.UpdateRepoParams.WebhookSecret = "webhook-secret"
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Enterprises[0].ID, s.Fixtures.Enterprises[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error updating enterprise: error encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestGetEnterpriseByID() {
- enterprise, err := s.Store.GetEnterpriseByID(context.Background(), s.Fixtures.Enterprises[0].ID)
+ enterprise, err := s.Store.GetEnterpriseByID(s.adminCtx, s.Fixtures.Enterprises[0].ID)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Enterprises[0].ID, enterprise.ID)
}
func (s *EnterpriseTestSuite) TestGetEnterpriseByIDInvalidEnterpriseID() {
- _, err := s.Store.GetEnterpriseByID(context.Background(), "dummy-enterprise-id")
+ _, err := s.Store.GetEnterpriseByID(s.adminCtx, "dummy-enterprise-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching enterprise: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestGetEnterpriseByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprise_events` WHERE `enterprise_events`.`enterprise_id` = ? AND `enterprise_events`.`deleted_at` IS NULL")).
+ WithArgs(s.Fixtures.Enterprises[0].ID).
+ WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND `pools`.`deleted_at` IS NULL")).
WithArgs(s.Fixtures.Enterprises[0].ID).
WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- _, err := s.StoreSQLMocked.GetEnterpriseByID(context.Background(), s.Fixtures.Enterprises[0].ID)
+ _, err := s.StoreSQLMocked.GetEnterpriseByID(s.adminCtx, s.Fixtures.Enterprises[0].ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: missing secret", err.Error())
+ s.Require().Equal("error fetching enterprise: missing secret", err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePool() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().Nil(err)
- enterprise, err := s.Store.GetEnterpriseByID(context.Background(), s.Fixtures.Enterprises[0].ID)
+ enterprise, err := s.Store.GetEnterpriseByID(s.adminCtx, s.Fixtures.Enterprises[0].ID)
if err != nil {
s.FailNow(fmt.Sprintf("cannot get enterprise by ID: %v", err))
}
@@ -419,216 +531,119 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePool() {
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolMissingTags() {
s.Fixtures.CreatePoolParams.Tags = []string{}
-
- _, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
s.Require().Equal("no tags specified", err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolInvalidEnterpriseID() {
- _, err := s.Store.CreateEnterprisePool(context.Background(), "dummy-enterprise-id", s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
-}
-
-func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBCreateErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WillReturnError(fmt.Errorf("mocked creating pool error"))
-
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("creating pool: fetching pool: mocked creating pool error", err.Error())
-}
-
-func (s *EnterpriseTestSuite) TestCreateEnterpriseDBPoolAlreadyExistErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id", "provider_name", "image", "flavor"}).
- AddRow(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor))
-
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal(runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider"), err)
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchTagErr() {
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching tag: fetching tag from database: mocked fetching tag error", err.Error())
+ s.Require().Equal("error creating tag: error fetching tag from database: mocked fetching tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBAddingPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
-
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnError(fmt.Errorf("mocked adding pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("adding pool: mocked adding pool error", err.Error())
+ s.Require().Equal("error creating pool: mocked adding pool error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBSaveTagErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnError(fmt.Errorf("mocked saving tag error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving tag: mocked saving tag error", err.Error())
+ s.Require().Equal("error associating tags: mocked saving tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnResult(sqlmock.NewResult(1, 1))
@@ -640,161 +655,163 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: not found", err.Error())
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestListEnterprisePools() {
enterprisePools := []params.Pool{}
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Flavor = fmt.Sprintf("test-flavor-%v", i)
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
enterprisePools = append(enterprisePools, pool)
}
- pools, err := s.Store.ListEnterprisePools(context.Background(), s.Fixtures.Enterprises[0].ID)
+ pools, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().Nil(err)
garmTesting.EqualDBEntityID(s.T(), enterprisePools, pools)
}
func (s *EnterpriseTestSuite) TestListEnterprisePoolsInvalidEnterpriseID() {
- _, err := s.Store.ListEnterprisePools(context.Background(), "dummy-enterprise-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching pools: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pools: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestGetEnterprisePool() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
- enterprisePool, err := s.Store.GetEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID)
+ enterprisePool, err := s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
s.Require().Equal(enterprisePool.ID, pool.ID)
}
func (s *EnterpriseTestSuite) TestGetEnterprisePoolInvalidEnterpriseID() {
- _, err := s.Store.GetEnterprisePool(context.Background(), "dummy-enterprise-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.GetEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("fetching pool: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePool() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
- err = s.Store.DeleteEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID)
+ err = s.Store.DeleteEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
- _, err = s.Store.GetEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolInvalidEnterpriseID() {
- err := s.Store.DeleteEnterprisePool(context.Background(), "dummy-enterprise-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ err := s.Store.DeleteEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("looking up enterprise pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolDBDeleteErr() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (id = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID, s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id", "id"}).AddRow(s.Fixtures.Enterprises[0].ID, pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE `pools`.`id` = ?")).
- WithArgs(pool.ID).
+ ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE id = ? and enterprise_id = ?")).
+ WithArgs(pool.ID, s.Fixtures.Enterprises[0].ID).
WillReturnError(fmt.Errorf("mocked deleting pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- err = s.StoreSQLMocked.DeleteEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID)
-
+ err = s.StoreSQLMocked.DeleteEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().NotNil(err)
+ s.Require().Equal("error removing pool: mocked deleting pool error", err.Error())
s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("deleting pool: mocked deleting pool error", err.Error())
-}
-
-func (s *EnterpriseTestSuite) TestFindEnterprisePoolByTags() {
- enterprisePool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
- if err != nil {
- s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
- }
-
- pool, err := s.Store.FindEnterprisePoolByTags(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams.Tags)
-
- s.Require().Nil(err)
- s.Require().Equal(enterprisePool.ID, pool.ID)
- s.Require().Equal(enterprisePool.Image, pool.Image)
- s.Require().Equal(enterprisePool.Flavor, pool.Flavor)
-}
-
-func (s *EnterpriseTestSuite) TestFindEnterprisePoolByTagsMissingTags() {
- tags := []string{}
-
- _, err := s.Store.FindEnterprisePoolByTags(context.Background(), s.Fixtures.Enterprises[0].ID, tags)
-
- s.Require().NotNil(err)
- s.Require().Equal("fetching pool: missing tags", err.Error())
}
func (s *EnterpriseTestSuite) TestListEnterpriseInstances() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
poolInstances := []params.Instance{}
for i := 1; i <= 3; i++ {
s.Fixtures.CreateInstanceParams.Name = fmt.Sprintf("test-enterprise-%v", i)
- instance, err := s.Store.CreateInstance(context.Background(), pool.ID, s.Fixtures.CreateInstanceParams)
+ instance, err := s.Store.CreateInstance(s.adminCtx, pool.ID, s.Fixtures.CreateInstanceParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create instance: %s", err))
}
poolInstances = append(poolInstances, instance)
}
- instances, err := s.Store.ListEnterpriseInstances(context.Background(), s.Fixtures.Enterprises[0].ID)
+ instances, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().Nil(err)
s.equalInstancesByName(poolInstances, instances)
}
func (s *EnterpriseTestSuite) TestListEnterpriseInstancesInvalidEnterpriseID() {
- _, err := s.Store.ListEnterpriseInstances(context.Background(), "dummy-enterprise-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching entity: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePool() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
- pool, err = s.Store.UpdateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID, s.Fixtures.UpdatePoolParams)
+ pool, err = s.Store.UpdateEntityPool(s.adminCtx, entity, pool.ID, s.Fixtures.UpdatePoolParams)
s.Require().Nil(err)
s.Require().Equal(*s.Fixtures.UpdatePoolParams.MaxRunners, pool.MaxRunners)
@@ -804,13 +821,38 @@ func (s *EnterpriseTestSuite) TestUpdateEnterprisePool() {
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolInvalidEnterpriseID() {
- _, err := s.Store.UpdateEnterprisePool(context.Background(), "dummy-enterprise-id", "dummy-pool-id", s.Fixtures.UpdatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.UpdateEntityPool(s.adminCtx, entity, "dummy-pool-id", s.Fixtures.UpdatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
+}
+
+func (s *EnterpriseTestSuite) TestAddRepoEntityEvent() {
+ enterprise, err := s.Store.CreateEnterprise(
+ s.adminCtx,
+ s.Fixtures.CreateEnterpriseParams.Name,
+ s.testCreds,
+ s.Fixtures.CreateEnterpriseParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+
+ s.Require().Nil(err)
+ entity, err := enterprise.GetEntity()
+ s.Require().Nil(err)
+ err = s.Store.AddEntityEvent(s.adminCtx, entity, params.StatusEvent, params.EventInfo, "this is a test", 20)
+ s.Require().Nil(err)
+
+ enterprise, err = s.Store.GetEnterpriseByID(s.adminCtx, enterprise.ID)
+ s.Require().Nil(err)
+ s.Require().Equal(1, len(enterprise.Events))
+ s.Require().Equal(params.StatusEvent, enterprise.Events[0].EventType)
+ s.Require().Equal(params.EventInfo, enterprise.Events[0].EventLevel)
+ s.Require().Equal("this is a test", enterprise.Events[0].Message)
}
func TestEnterpriseTestSuite(t *testing.T) {
- t.Parallel()
suite.Run(t, new(EnterpriseTestSuite))
}
diff --git a/database/sql/gitea.go b/database/sql/gitea.go
new file mode 100644
index 00000000..a9edde09
--- /dev/null
+++ b/database/sql/gitea.go
@@ -0,0 +1,486 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+
+ "gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *sqlDatabase) CreateGiteaEndpoint(_ context.Context, param params.CreateGiteaEndpointParams) (ghEndpoint params.ForgeEndpoint, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.CreateOperation, ghEndpoint)
+ }
+ }()
+ var endpoint GithubEndpoint
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := tx.Where("name = ?", param.Name).First(&endpoint).Error; err == nil {
+ return fmt.Errorf("gitea endpoint already exists: %w", runnerErrors.ErrDuplicateEntity)
+ }
+ endpoint = GithubEndpoint{
+ Name: param.Name,
+ Description: param.Description,
+ APIBaseURL: param.APIBaseURL,
+ BaseURL: param.BaseURL,
+ CACertBundle: param.CACertBundle,
+ EndpointType: params.GiteaEndpointType,
+ }
+
+ if err := tx.Create(&endpoint).Error; err != nil {
+ return fmt.Errorf("error creating gitea endpoint: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error creating gitea endpoint: %w", err)
+ }
+ ghEndpoint, err = s.sqlToCommonGithubEndpoint(endpoint)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error converting gitea endpoint: %w", err)
+ }
+ return ghEndpoint, nil
+}
+
+func (s *sqlDatabase) ListGiteaEndpoints(_ context.Context) ([]params.ForgeEndpoint, error) {
+ var endpoints []GithubEndpoint
+ err := s.conn.Where("endpoint_type = ?", params.GiteaEndpointType).Find(&endpoints).Error
+ if err != nil {
+ return nil, fmt.Errorf("error fetching gitea endpoints: %w", err)
+ }
+
+ var ret []params.ForgeEndpoint
+ for _, ep := range endpoints {
+ commonEp, err := s.sqlToCommonGithubEndpoint(ep)
+ if err != nil {
+ return nil, fmt.Errorf("error converting gitea endpoint: %w", err)
+ }
+ ret = append(ret, commonEp)
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateGiteaEndpoint(_ context.Context, name string, param params.UpdateGiteaEndpointParams) (ghEndpoint params.ForgeEndpoint, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.UpdateOperation, ghEndpoint)
+ }
+ }()
+ var endpoint GithubEndpoint
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := tx.Where("name = ? and endpoint_type = ?", name, params.GiteaEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return runnerErrors.NewNotFoundError("gitea endpoint %q not found", name)
+ }
+ return fmt.Errorf("error fetching gitea endpoint: %w", err)
+ }
+
+ var credsCount int64
+ if err := tx.Model(&GiteaCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ }
+ if credsCount > 0 && (param.APIBaseURL != nil || param.BaseURL != nil) {
+ return runnerErrors.NewBadRequestError("cannot update endpoint URLs with existing credentials")
+ }
+
+ if param.APIBaseURL != nil {
+ endpoint.APIBaseURL = *param.APIBaseURL
+ }
+
+ if param.BaseURL != nil {
+ endpoint.BaseURL = *param.BaseURL
+ }
+
+ if param.CACertBundle != nil {
+ endpoint.CACertBundle = param.CACertBundle
+ }
+
+ if param.Description != nil {
+ endpoint.Description = *param.Description
+ }
+
+ if err := tx.Save(&endpoint).Error; err != nil {
+ return fmt.Errorf("error updating gitea endpoint: %w", err)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error updating gitea endpoint: %w", err)
+ }
+ ghEndpoint, err = s.sqlToCommonGithubEndpoint(endpoint)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error converting gitea endpoint: %w", err)
+ }
+ return ghEndpoint, nil
+}
+
+func (s *sqlDatabase) GetGiteaEndpoint(_ context.Context, name string) (params.ForgeEndpoint, error) {
+ var endpoint GithubEndpoint
+ err := s.conn.Where("name = ? and endpoint_type = ?", name, params.GiteaEndpointType).First(&endpoint).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return params.ForgeEndpoint{}, runnerErrors.NewNotFoundError("gitea endpoint %q not found", name)
+ }
+ return params.ForgeEndpoint{}, fmt.Errorf("error fetching gitea endpoint: %w", err)
+ }
+
+ return s.sqlToCommonGithubEndpoint(endpoint)
+}
+
+func (s *sqlDatabase) DeleteGiteaEndpoint(_ context.Context, name string) (err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.DeleteOperation, params.ForgeEndpoint{Name: name})
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var endpoint GithubEndpoint
+ if err := tx.Where("name = ? and endpoint_type = ?", name, params.GiteaEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching gitea endpoint: %w", err)
+ }
+
+ var credsCount int64
+ if err := tx.Model(&GiteaCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ }
+
+ var repoCnt int64
+ if err := tx.Model(&Repository{}).Where("endpoint_name = ?", endpoint.Name).Count(&repoCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching gitea repositories: %w", err)
+ }
+ }
+
+ var orgCnt int64
+ if err := tx.Model(&Organization{}).Where("endpoint_name = ?", endpoint.Name).Count(&orgCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching gitea organizations: %w", err)
+ }
+ }
+
+ if credsCount > 0 || repoCnt > 0 || orgCnt > 0 {
+ return runnerErrors.NewBadRequestError("cannot delete endpoint with associated entities")
+ }
+
+ if err := tx.Unscoped().Delete(&endpoint).Error; err != nil {
+ return fmt.Errorf("error deleting gitea endpoint: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting gitea endpoint: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) CreateGiteaCredentials(ctx context.Context, param params.CreateGiteaCredentialsParams) (gtCreds params.ForgeCredentials, err error) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error creating gitea credentials: %w", err)
+ }
+ if param.Endpoint == "" {
+ return params.ForgeCredentials{}, runnerErrors.NewBadRequestError("endpoint name is required")
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GiteaCredentialsEntityType, common.CreateOperation, gtCreds)
+ }
+ }()
+ var creds GiteaCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var endpoint GithubEndpoint
+ if err := tx.Where("name = ? and endpoint_type = ?", param.Endpoint, params.GiteaEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return runnerErrors.NewNotFoundError("gitea endpoint %q not found", param.Endpoint)
+ }
+ return fmt.Errorf("error fetching gitea endpoint: %w", err)
+ }
+
+ if err := tx.Where("name = ? and user_id = ?", param.Name, userID).First(&creds).Error; err == nil {
+ return fmt.Errorf("gitea credentials already exists: %w", runnerErrors.ErrDuplicateEntity)
+ }
+
+ var data []byte
+ var err error
+ switch param.AuthType {
+ case params.ForgeAuthTypePAT:
+ data, err = s.marshalAndSeal(param.PAT)
+ default:
+ return runnerErrors.NewBadRequestError("invalid auth type %q", param.AuthType)
+ }
+ if err != nil {
+ return fmt.Errorf("error marshaling and sealing credentials: %w", err)
+ }
+
+ creds = GiteaCredentials{
+ Name: param.Name,
+ Description: param.Description,
+ EndpointName: &endpoint.Name,
+ AuthType: param.AuthType,
+ Payload: data,
+ UserID: &userID,
+ }
+
+ if err := tx.Create(&creds).Error; err != nil {
+ return fmt.Errorf("error creating gitea credentials: %w", err)
+ }
+ // Skip making an extra query.
+ creds.Endpoint = endpoint
+
+ return nil
+ })
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error creating gitea credentials: %w", err)
+ }
+ gtCreds, err = s.sqlGiteaToCommonForgeCredentials(creds)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting gitea credentials: %w", err)
+ }
+ return gtCreds, nil
+}
+
+func (s *sqlDatabase) getGiteaCredentialsByName(ctx context.Context, tx *gorm.DB, name string, detailed bool) (GiteaCredentials, error) {
+ var creds GiteaCredentials
+ q := tx.Preload("Endpoint")
+
+ if detailed {
+ q = q.
+ Preload("Repositories").
+ Preload("Organizations").
+ Preload("Repositories.GiteaCredentials").
+ Preload("Organizations.GiteaCredentials").
+ Preload("Repositories.Credentials").
+ Preload("Organizations.Credentials")
+ }
+
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return GiteaCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+
+ err = q.Where("name = ?", name).First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return GiteaCredentials{}, runnerErrors.NewNotFoundError("gitea credentials %q not found", name)
+ }
+ return GiteaCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (s *sqlDatabase) GetGiteaCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error) {
+ creds, err := s.getGiteaCredentialsByName(ctx, s.conn, name, detailed)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ return s.sqlGiteaToCommonForgeCredentials(creds)
+}
+
+func (s *sqlDatabase) GetGiteaCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error) {
+ var creds GiteaCredentials
+ q := s.conn.Preload("Endpoint")
+
+ if detailed {
+ q = q.
+ Preload("Repositories").
+ Preload("Organizations").
+ Preload("Repositories.GiteaCredentials").
+ Preload("Organizations.GiteaCredentials").
+ Preload("Repositories.Credentials").
+ Preload("Organizations.Credentials")
+ }
+
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ err := q.Where("id = ?", id).First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return params.ForgeCredentials{}, runnerErrors.NewNotFoundError("gitea credentials not found")
+ }
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ return s.sqlGiteaToCommonForgeCredentials(creds)
+}
+
+func (s *sqlDatabase) ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ q := s.conn.Preload("Endpoint")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ var creds []GiteaCredentials
+ err := q.Preload("Endpoint").Find(&creds).Error
+ if err != nil {
+ return nil, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ var ret []params.ForgeCredentials
+ for _, c := range creds {
+ commonCreds, err := s.sqlGiteaToCommonForgeCredentials(c)
+ if err != nil {
+ return nil, fmt.Errorf("error converting gitea credentials: %w", err)
+ }
+ ret = append(ret, commonCreds)
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateGiteaCredentials(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams) (gtCreds params.ForgeCredentials, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GiteaCredentialsEntityType, common.UpdateOperation, gtCreds)
+ }
+ }()
+ var creds GiteaCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Preload("Endpoint")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return fmt.Errorf("error updating gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ if err := q.Where("id = ?", id).First(&creds).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return runnerErrors.NewNotFoundError("gitea credentials not found")
+ }
+ return fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ if param.Name != nil {
+ creds.Name = *param.Name
+ }
+ if param.Description != nil {
+ creds.Description = *param.Description
+ }
+
+ var data []byte
+ var err error
+ switch creds.AuthType {
+ case params.ForgeAuthTypePAT:
+ if param.PAT != nil {
+ data, err = s.marshalAndSeal(param.PAT)
+ }
+ default:
+ return runnerErrors.NewBadRequestError("invalid auth type %q", creds.AuthType)
+ }
+
+ if err != nil {
+ return fmt.Errorf("error marshaling and sealing credentials: %w", err)
+ }
+ if len(data) > 0 {
+ creds.Payload = data
+ }
+
+ if err := tx.Save(&creds).Error; err != nil {
+ return fmt.Errorf("error updating gitea credentials: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error updating gitea credentials: %w", err)
+ }
+
+ gtCreds, err = s.sqlGiteaToCommonForgeCredentials(creds)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting gitea credentials: %w", err)
+ }
+ return gtCreds, nil
+}
+
+func (s *sqlDatabase) DeleteGiteaCredentials(ctx context.Context, id uint) (err error) {
+ var creds GiteaCredentials
+ defer func() {
+ if err == nil {
+ forgeCreds, innerErr := s.sqlGiteaToCommonForgeCredentials(creds)
+ if innerErr != nil {
+ slog.ErrorContext(ctx, "converting gitea credentials", "error", innerErr)
+ }
+ if creds.ID == 0 || creds.Name == "" {
+ return
+ }
+ s.sendNotify(common.GiteaCredentialsEntityType, common.DeleteOperation, forgeCreds)
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Where("id = ?", id).
+ Preload("Repositories").
+ Preload("Organizations")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return fmt.Errorf("error deleting gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ err := q.First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ if len(creds.Repositories) > 0 {
+ return runnerErrors.NewBadRequestError("cannot delete credentials with repositories")
+ }
+ if len(creds.Organizations) > 0 {
+ return runnerErrors.NewBadRequestError("cannot delete credentials with organizations")
+ }
+ if err := tx.Unscoped().Delete(&creds).Error; err != nil {
+ return fmt.Errorf("error deleting gitea credentials: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting gitea credentials: %w", err)
+ }
+ return nil
+}
diff --git a/database/sql/gitea_test.go b/database/sql/gitea_test.go
new file mode 100644
index 00000000..dff5c471
--- /dev/null
+++ b/database/sql/gitea_test.go
@@ -0,0 +1,848 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type GiteaTestSuite struct {
+ suite.Suite
+
+ giteaEndpoint params.ForgeEndpoint
+ db common.Store
+}
+
+func (s *GiteaTestSuite) SetupTest() {
+ db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
+ }
+
+ s.db = db
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+ endpoint, err := s.db.CreateGiteaEndpoint(context.Background(), createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(testEndpointName, endpoint.Name)
+ s.giteaEndpoint = endpoint
+}
+
+func (s *GiteaTestSuite) TestCreatingEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(alternetTestEndpointName, endpoint.Name)
+}
+
+func (s *GiteaTestSuite) TestCreatingDuplicateEndpointFails() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ _, err = s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrDuplicateEntity)
+}
+
+func (s *GiteaTestSuite) TestGetEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ newEndpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ endpoint, err := s.db.GetGiteaEndpoint(ctx, createEpParams.Name)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(newEndpoint.Name, endpoint.Name)
+}
+
+func (s *GiteaTestSuite) TestGetNonExistingEndpointFailsWithNotFoundError() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.GetGiteaEndpoint(ctx, "non-existing")
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestDeletingNonExistingEndpointIsANoop() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ err := s.db.DeleteGiteaEndpoint(ctx, "non-existing")
+ s.Require().NoError(err)
+}
+
+func (s *GiteaTestSuite) TestDeletingEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ err = s.db.DeleteGiteaEndpoint(ctx, alternetTestEndpointName)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaEndpoint(ctx, alternetTestEndpointName)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestUpdateEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ newDescription := "another description"
+ newAPIBaseURL := "https://updated.example.com"
+ newBaseURL := "https://updated.example.com"
+ caCertBundle, err := os.ReadFile("../../testdata/certs/srv-pub.pem")
+ s.Require().NoError(err)
+ updateEpParams := params.UpdateGiteaEndpointParams{
+ Description: &newDescription,
+ APIBaseURL: &newAPIBaseURL,
+ BaseURL: &newBaseURL,
+ CACertBundle: caCertBundle,
+ }
+
+ updatedEndpoint, err := s.db.UpdateGiteaEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(updatedEndpoint)
+ s.Require().Equal(newDescription, updatedEndpoint.Description)
+ s.Require().Equal(newAPIBaseURL, updatedEndpoint.APIBaseURL)
+ s.Require().Equal(newBaseURL, updatedEndpoint.BaseURL)
+ s.Require().Equal(caCertBundle, updatedEndpoint.CACertBundle)
+}
+
+func (s *GiteaTestSuite) TestUpdatingNonExistingEndpointReturnsNotFoundError() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ newDescription := "test desc"
+ updateEpParams := params.UpdateGiteaEndpointParams{
+ Description: &newDescription,
+ }
+
+ _, err := s.db.UpdateGiteaEndpoint(ctx, "non-existing", updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestListEndpoints() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ endpoints, err := s.db.ListGiteaEndpoints(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(endpoints, 2)
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsWithUnauthorizedForAnonUser() {
+ ctx := context.Background()
+
+ _, err := s.db.CreateGiteaCredentials(ctx, params.CreateGiteaCredentialsParams{})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrUnauthorized)
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsWhenEndpointNameIsEmpty() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGiteaCredentials(ctx, params.CreateGiteaCredentialsParams{})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Regexp("endpoint name is required", err.Error())
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsWhenEndpointDoesNotExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGiteaCredentials(ctx, params.CreateGiteaCredentialsParams{Endpoint: "non-existing"})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+ s.Require().Regexp("error creating gitea credentials: gitea endpoint \"non-existing\" not found", err.Error())
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsWhenAuthTypeIsInvalid() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGiteaCredentials(ctx, params.CreateGiteaCredentialsParams{Endpoint: s.giteaEndpoint.Name, AuthType: "invalid"})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Regexp("invalid auth type", err.Error())
+}
+
+func (s *GiteaTestSuite) TestCreateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+ s.Require().Equal(credParams.Name, creds.Name)
+ s.Require().Equal(credParams.Description, creds.Description)
+ s.Require().Equal(credParams.Endpoint, creds.Endpoint.Name)
+ s.Require().Equal(credParams.AuthType, creds.AuthType)
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsOnDuplicateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "testuser", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ // Creating creds with the same parameters should fail for the same user.
+ _, err = s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrDuplicateEntity)
+
+ // Creating creds with the same parameters should work for different users.
+ _, err = s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+}
+
+func (s *GiteaTestSuite) TestNormalUsersCanOnlySeeTheirOwnCredentialsAdminCanSeeAll() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "testuser1", s.db, s.T())
+ testUser2 := garmTesting.CreateGARMTestUser(ctx, "testuser2", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+ testUser2Ctx := auth.PopulateContext(context.Background(), testUser2, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ credParams.Name = "test-creds2"
+ creds2, err := s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ credParams.Name = "test-creds3"
+ creds3, err := s.db.CreateGiteaCredentials(testUser2Ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds3)
+
+ credsList, err := s.db.ListGiteaCredentials(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 3)
+
+ credsList, err = s.db.ListGiteaCredentials(testUserCtx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 1)
+ s.Require().Equal("test-creds2", credsList[0].Name)
+
+ credsList, err = s.db.ListGiteaCredentials(testUser2Ctx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 1)
+ s.Require().Equal("test-creds3", credsList[0].Name)
+}
+
+func (s *GiteaTestSuite) TestGetGiteaCredentialsFailsWhenCredentialsDontExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.GetGiteaCredentials(ctx, 1, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+
+ _, err = s.db.GetGiteaCredentialsByName(ctx, "non-existing", true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestGetGithubCredentialsByNameReturnsOnlyCurrentUserCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user1", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ creds2, err := s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ creds2Get, err := s.db.GetGiteaCredentialsByName(testUserCtx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(testCredsName, creds2Get.Name)
+ s.Require().Equal(creds2.ID, creds2Get.ID)
+
+ credsGet, err := s.db.GetGiteaCredentialsByName(ctx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+ s.Require().Equal(testCredsName, credsGet.Name)
+ s.Require().Equal(creds.ID, credsGet.ID)
+
+ // Admin can get any creds by ID
+ credsGet, err = s.db.GetGiteaCredentials(ctx, creds2.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds2.ID, credsGet.ID)
+
+ // Normal user cannot get other user creds by ID
+ _, err = s.db.GetGiteaCredentials(testUserCtx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestGetGithubCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ creds2, err := s.db.GetGiteaCredentialsByName(ctx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds.Name, creds2.Name)
+ s.Require().Equal(creds.ID, creds2.ID)
+
+ creds2, err = s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds.Name, creds2.Name)
+ s.Require().Equal(creds.ID, creds2.ID)
+}
+
+func (s *GiteaTestSuite) TestDeleteGiteaCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestDeleteGiteaCredentialsByNonAdminUser() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user4", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds4",
+ },
+ }
+
+ // Create creds as admin
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ // Deleting non existent creds will return a nil error. For the test user
+ // the creds created by the admin should not be visible, which leads to not found
+ // which in turn returns no error.
+ err = s.db.DeleteGiteaCredentials(testUserCtx, creds.ID)
+ s.Require().NoError(err)
+
+ // Check that the creds created by the admin are still there.
+ credsGet, err := s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(credsGet)
+ s.Require().Equal(creds.ID, credsGet.ID)
+
+ // Create the same creds with the test user.
+ creds2, err := s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ // Remove creds created by test user.
+ err = s.db.DeleteGiteaCredentials(testUserCtx, creds2.ID)
+ s.Require().NoError(err)
+
+ // The creds created by the test user should be gone.
+ _, err = s.db.GetGiteaCredentials(testUserCtx, creds2.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestDeleteCredentialsFailsIfReposOrgsOrEntitiesUseIt() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ enterprise, err := s.db.CreateEnterprise(ctx, "test-enterprise", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Equal(params.Enterprise{}, enterprise)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestUpdateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "just a description"
+ newName := "new-name"
+ newToken := "new-token"
+ updateCredParams := params.UpdateGiteaCredentialsParams{
+ Description: &newDescription,
+ Name: &newName,
+ PAT: ¶ms.GithubPAT{
+ OAuth2Token: newToken,
+ },
+ }
+
+ updatedCreds, err := s.db.UpdateGiteaCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(updatedCreds)
+ s.Require().Equal(newDescription, updatedCreds.Description)
+ s.Require().Equal(newName, updatedCreds.Name)
+}
+
+func (s *GiteaTestSuite) TestUpdateCredentialsFailsForNonExistingCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ updateCredParams := params.UpdateGiteaCredentialsParams{
+ Description: nil,
+ }
+
+ _, err := s.db.UpdateGiteaCredentials(ctx, 1, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestUpdateCredentialsFailsIfCredentialsAreOwnedByNonAdminUser() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user5", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "new params desc"
+ updateCredParams := params.UpdateGiteaCredentialsParams{
+ Description: &newDescription,
+ }
+
+ _, err = s.db.UpdateGiteaCredentials(testUserCtx, creds.ID, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestAdminUserCanUpdateAnyGiteaCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user5", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "another new description"
+ updateCredParams := params.UpdateGiteaCredentialsParams{
+ Description: &newDescription,
+ }
+
+ newCreds, err := s.db.UpdateGiteaCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDescription, newCreds.Description)
+}
+
+func (s *GiteaTestSuite) TestDeleteCredentialsWithOrgsOrReposFails() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestDeleteGiteaEndpointFailsWithOrgsReposOrCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ endpointParams := params.CreateGiteaEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ ep, err := s.db.CreateGiteaEndpoint(ctx, endpointParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(ep)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: ep.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ badRequest := &runnerErrors.BadRequestError{}
+ err = s.db.DeleteGiteaEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorAs(err, &badRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGiteaEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorAs(err, &badRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGiteaEndpoint(ctx, ep.Name)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestUpdateEndpointURLsFailsIfCredentialsAreAssociated() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: testEndpointName,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err = s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ newDescription := "new gitea description"
+ newBaseURL := "https://new-gitea.example.com"
+ newAPIBaseURL := "https://new-gotea.example.com"
+ updateEpParams := params.UpdateGiteaEndpointParams{
+ BaseURL: &newBaseURL,
+ }
+
+ _, err = s.db.UpdateGiteaEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating gitea endpoint: cannot update endpoint URLs with existing credentials")
+
+ updateEpParams = params.UpdateGiteaEndpointParams{
+ APIBaseURL: &newAPIBaseURL,
+ }
+ _, err = s.db.UpdateGiteaEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating gitea endpoint: cannot update endpoint URLs with existing credentials")
+
+ updateEpParams = params.UpdateGiteaEndpointParams{
+ Description: &newDescription,
+ }
+ ret, err := s.db.UpdateGiteaEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDescription, ret.Description)
+}
+
+func (s *GiteaTestSuite) TestListGiteaEndpoints() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ endpoints, err := s.db.ListGiteaEndpoints(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(endpoints, 2)
+}
+
+func TestGiteaTestSuite(t *testing.T) {
+ suite.Run(t, new(GiteaTestSuite))
+}
diff --git a/database/sql/github.go b/database/sql/github.go
new file mode 100644
index 00000000..626d138f
--- /dev/null
+++ b/database/sql/github.go
@@ -0,0 +1,513 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *sqlDatabase) CreateGithubEndpoint(_ context.Context, param params.CreateGithubEndpointParams) (ghEndpoint params.ForgeEndpoint, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.CreateOperation, ghEndpoint)
+ }
+ }()
+ var endpoint GithubEndpoint
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := tx.Where("name = ?", param.Name).First(&endpoint).Error; err == nil {
+ return fmt.Errorf("error github endpoint already exists: %w", runnerErrors.ErrDuplicateEntity)
+ }
+ endpoint = GithubEndpoint{
+ Name: param.Name,
+ Description: param.Description,
+ APIBaseURL: param.APIBaseURL,
+ BaseURL: param.BaseURL,
+ UploadBaseURL: param.UploadBaseURL,
+ CACertBundle: param.CACertBundle,
+ EndpointType: params.GithubEndpointType,
+ }
+
+ if err := tx.Create(&endpoint).Error; err != nil {
+ return fmt.Errorf("error creating github endpoint: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error creating github endpoint: %w", err)
+ }
+ ghEndpoint, err = s.sqlToCommonGithubEndpoint(endpoint)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+ return ghEndpoint, nil
+}
+
+func (s *sqlDatabase) ListGithubEndpoints(_ context.Context) ([]params.ForgeEndpoint, error) {
+ var endpoints []GithubEndpoint
+ err := s.conn.Where("endpoint_type = ?", params.GithubEndpointType).Find(&endpoints).Error
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github endpoints: %w", err)
+ }
+
+ var ret []params.ForgeEndpoint
+ for _, ep := range endpoints {
+ commonEp, err := s.sqlToCommonGithubEndpoint(ep)
+ if err != nil {
+ return nil, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+ ret = append(ret, commonEp)
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateGithubEndpoint(_ context.Context, name string, param params.UpdateGithubEndpointParams) (ghEndpoint params.ForgeEndpoint, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.UpdateOperation, ghEndpoint)
+ }
+ }()
+ var endpoint GithubEndpoint
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := tx.Where("name = ? and endpoint_type = ?", name, params.GithubEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error github endpoint not found: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching github endpoint: %w", err)
+ }
+
+ var credsCount int64
+ if err := tx.Model(&GithubCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ }
+ if credsCount > 0 && (param.APIBaseURL != nil || param.BaseURL != nil || param.UploadBaseURL != nil) {
+ return fmt.Errorf("cannot update endpoint URLs with existing credentials: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if param.APIBaseURL != nil {
+ endpoint.APIBaseURL = *param.APIBaseURL
+ }
+
+ if param.BaseURL != nil {
+ endpoint.BaseURL = *param.BaseURL
+ }
+
+ if param.UploadBaseURL != nil {
+ endpoint.UploadBaseURL = *param.UploadBaseURL
+ }
+
+ if param.CACertBundle != nil {
+ endpoint.CACertBundle = param.CACertBundle
+ }
+
+ if param.Description != nil {
+ endpoint.Description = *param.Description
+ }
+
+ if err := tx.Save(&endpoint).Error; err != nil {
+ return fmt.Errorf("error updating github endpoint: %w", err)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error updating github endpoint: %w", err)
+ }
+ ghEndpoint, err = s.sqlToCommonGithubEndpoint(endpoint)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+ return ghEndpoint, nil
+}
+
+func (s *sqlDatabase) GetGithubEndpoint(_ context.Context, name string) (params.ForgeEndpoint, error) {
+ var endpoint GithubEndpoint
+
+ err := s.conn.Where("name = ? and endpoint_type = ?", name, params.GithubEndpointType).First(&endpoint).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return params.ForgeEndpoint{}, fmt.Errorf("github endpoint not found: %w", runnerErrors.ErrNotFound)
+ }
+ return params.ForgeEndpoint{}, fmt.Errorf("error fetching github endpoint: %w", err)
+ }
+
+ return s.sqlToCommonGithubEndpoint(endpoint)
+}
+
+func (s *sqlDatabase) DeleteGithubEndpoint(_ context.Context, name string) (err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.DeleteOperation, params.ForgeEndpoint{Name: name})
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var endpoint GithubEndpoint
+ if err := tx.Where("name = ? and endpoint_type = ?", name, params.GithubEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching github endpoint: %w", err)
+ }
+
+ var credsCount int64
+ if err := tx.Model(&GithubCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ }
+
+ var repoCnt int64
+ if err := tx.Model(&Repository{}).Where("endpoint_name = ?", endpoint.Name).Count(&repoCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github repositories: %w", err)
+ }
+ }
+
+ var orgCnt int64
+ if err := tx.Model(&Organization{}).Where("endpoint_name = ?", endpoint.Name).Count(&orgCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github organizations: %w", err)
+ }
+ }
+
+ var entCnt int64
+ if err := tx.Model(&Enterprise{}).Where("endpoint_name = ?", endpoint.Name).Count(&entCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github enterprises: %w", err)
+ }
+ }
+
+ if credsCount > 0 || repoCnt > 0 || orgCnt > 0 || entCnt > 0 {
+ return fmt.Errorf("cannot delete endpoint with associated entities: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err := tx.Unscoped().Delete(&endpoint).Error; err != nil {
+ return fmt.Errorf("error deleting github endpoint: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting github endpoint: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) CreateGithubCredentials(ctx context.Context, param params.CreateGithubCredentialsParams) (ghCreds params.ForgeCredentials, err error) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error creating github credentials: %w", err)
+ }
+ if param.Endpoint == "" {
+ return params.ForgeCredentials{}, fmt.Errorf("endpoint name is required: %w", runnerErrors.ErrBadRequest)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubCredentialsEntityType, common.CreateOperation, ghCreds)
+ }
+ }()
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var endpoint GithubEndpoint
+ if err := tx.Where("name = ? and endpoint_type = ?", param.Endpoint, params.GithubEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("github endpoint not found: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching github endpoint: %w", err)
+ }
+
+ if err := tx.Where("name = ? and user_id = ?", param.Name, userID).First(&creds).Error; err == nil {
+ return fmt.Errorf("github credentials already exists: %w", runnerErrors.ErrDuplicateEntity)
+ }
+
+ var data []byte
+ var err error
+ switch param.AuthType {
+ case params.ForgeAuthTypePAT:
+ data, err = s.marshalAndSeal(param.PAT)
+ case params.ForgeAuthTypeApp:
+ data, err = s.marshalAndSeal(param.App)
+ default:
+ return fmt.Errorf("invalid auth type: %w", runnerErrors.ErrBadRequest)
+ }
+ if err != nil {
+ return fmt.Errorf("error marshaling and sealing credentials: %w", err)
+ }
+
+ creds = GithubCredentials{
+ Name: param.Name,
+ Description: param.Description,
+ EndpointName: &endpoint.Name,
+ AuthType: param.AuthType,
+ Payload: data,
+ UserID: &userID,
+ }
+
+ if err := tx.Create(&creds).Error; err != nil {
+ return fmt.Errorf("error creating github credentials: %w", err)
+ }
+ // Skip making an extra query.
+ creds.Endpoint = endpoint
+
+ return nil
+ })
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error creating github credentials: %w", err)
+ }
+ ghCreds, err = s.sqlToCommonForgeCredentials(creds)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github credentials: %w", err)
+ }
+ return ghCreds, nil
+}
+
+func (s *sqlDatabase) getGithubCredentialsByName(ctx context.Context, tx *gorm.DB, name string, detailed bool) (GithubCredentials, error) {
+ var creds GithubCredentials
+ q := tx.Preload("Endpoint")
+
+ if detailed {
+ q = q.
+ Preload("Repositories").
+ Preload("Repositories.Credentials").
+ Preload("Organizations").
+ Preload("Organizations.Credentials").
+ Preload("Enterprises").
+ Preload("Enterprises.Credentials")
+ }
+
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return GithubCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+
+ err = q.Where("name = ?", name).First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return GithubCredentials{}, fmt.Errorf("github credentials not found: %w", runnerErrors.ErrNotFound)
+ }
+ return GithubCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (s *sqlDatabase) GetGithubCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error) {
+ creds, err := s.getGithubCredentialsByName(ctx, s.conn, name, detailed)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ return s.sqlToCommonForgeCredentials(creds)
+}
+
+func (s *sqlDatabase) GetGithubCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error) {
+ var creds GithubCredentials
+ q := s.conn.Preload("Endpoint")
+
+ if detailed {
+ q = q.
+ Preload("Repositories").
+ Preload("Repositories.Credentials").
+ Preload("Organizations").
+ Preload("Organizations.Credentials").
+ Preload("Enterprises").
+ Preload("Enterprises.Credentials")
+ }
+
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ err := q.Where("id = ?", id).First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return params.ForgeCredentials{}, fmt.Errorf("github credentials not found: %w", runnerErrors.ErrNotFound)
+ }
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ return s.sqlToCommonForgeCredentials(creds)
+}
+
+func (s *sqlDatabase) ListGithubCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ q := s.conn.Preload("Endpoint")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ var creds []GithubCredentials
+ err := q.Preload("Endpoint").Find(&creds).Error
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ var ret []params.ForgeCredentials
+ for _, c := range creds {
+ commonCreds, err := s.sqlToCommonForgeCredentials(c)
+ if err != nil {
+ return nil, fmt.Errorf("error converting github credentials: %w", err)
+ }
+ ret = append(ret, commonCreds)
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateGithubCredentials(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams) (ghCreds params.ForgeCredentials, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubCredentialsEntityType, common.UpdateOperation, ghCreds)
+ }
+ }()
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Preload("Endpoint")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return fmt.Errorf("error updating github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ if err := q.Where("id = ?", id).First(&creds).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("github credentials not found: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ if param.Name != nil {
+ creds.Name = *param.Name
+ }
+ if param.Description != nil {
+ creds.Description = *param.Description
+ }
+
+ var data []byte
+ var err error
+ switch creds.AuthType {
+ case params.ForgeAuthTypePAT:
+ if param.PAT != nil {
+ data, err = s.marshalAndSeal(param.PAT)
+ }
+
+ if param.App != nil {
+ return fmt.Errorf("cannot update app credentials for PAT: %w", runnerErrors.ErrBadRequest)
+ }
+ case params.ForgeAuthTypeApp:
+ if param.App != nil {
+ data, err = s.marshalAndSeal(param.App)
+ }
+
+ if param.PAT != nil {
+ return fmt.Errorf("cannot update PAT credentials for app: %w", runnerErrors.ErrBadRequest)
+ }
+ default:
+ // This should never happen, unless there was a bug in the DB migration code,
+ // or the DB was manually modified.
+ return fmt.Errorf("invalid auth type: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err != nil {
+ return fmt.Errorf("error marshaling and sealing credentials: %w", err)
+ }
+ if len(data) > 0 {
+ creds.Payload = data
+ }
+
+ if err := tx.Save(&creds).Error; err != nil {
+ return fmt.Errorf("error updating github credentials: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error updating github credentials: %w", err)
+ }
+
+ ghCreds, err = s.sqlToCommonForgeCredentials(creds)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github credentials: %w", err)
+ }
+ return ghCreds, nil
+}
+
+func (s *sqlDatabase) DeleteGithubCredentials(ctx context.Context, id uint) (err error) {
+ var name string
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubCredentialsEntityType, common.DeleteOperation, params.ForgeCredentials{ID: id, Name: name})
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Where("id = ?", id).
+ Preload("Repositories").
+ Preload("Organizations").
+ Preload("Enterprises")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return fmt.Errorf("error deleting github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ var creds GithubCredentials
+ err := q.First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ name = creds.Name
+
+ if len(creds.Repositories) > 0 {
+ return fmt.Errorf("cannot delete credentials with repositories: %w", runnerErrors.ErrBadRequest)
+ }
+ if len(creds.Organizations) > 0 {
+ return fmt.Errorf("cannot delete credentials with organizations: %w", runnerErrors.ErrBadRequest)
+ }
+ if len(creds.Enterprises) > 0 {
+ return fmt.Errorf("cannot delete credentials with enterprises: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err := tx.Unscoped().Delete(&creds).Error; err != nil {
+ return fmt.Errorf("error deleting github credentials: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting github credentials: %w", err)
+ }
+ return nil
+}
diff --git a/database/sql/github_test.go b/database/sql/github_test.go
new file mode 100644
index 00000000..ae3a3954
--- /dev/null
+++ b/database/sql/github_test.go
@@ -0,0 +1,1041 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/config"
+ "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+const (
+ testUploadBaseURL string = "https://uploads.example.com"
+ testBaseURL string = "https://example.com"
+ testAPIBaseURL string = "https://api.example.com"
+ testEndpointName string = "test-endpoint"
+ alternetTestEndpointName string = "test-endpoint-alternate"
+ testEndpointDescription string = "test description"
+ testCredsName string = "test-creds"
+ testCredsDescription string = "test creds"
+ defaultGithubEndpoint string = "github.com"
+)
+
+type GithubTestSuite struct {
+ suite.Suite
+
+ db common.Store
+}
+
+func (s *GithubTestSuite) SetupTest() {
+ db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
+ }
+ s.db = db
+}
+
+func (s *GithubTestSuite) TestDefaultEndpointGetsCreatedAutomaticallyIfNoOtherEndpointExists() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ endpoint, err := s.db.GetGithubEndpoint(ctx, defaultGithubEndpoint)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+}
+
+func (s *GithubTestSuite) TestDeletingDefaultEndpointWorksIfNoCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ err := s.db.DeleteGithubEndpoint(ctx, defaultGithubEndpoint)
+ s.Require().NoError(err)
+}
+
+func (s *GithubTestSuite) TestCreatingEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(testEndpointName, endpoint.Name)
+}
+
+func (s *GithubTestSuite) TestCreatingDuplicateEndpointFails() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ _, err = s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrDuplicateEntity)
+}
+
+func (s *GithubTestSuite) TestGetEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ endpoint, err := s.db.GetGithubEndpoint(ctx, defaultGithubEndpoint)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(defaultGithubEndpoint, endpoint.Name)
+}
+
+func (s *GithubTestSuite) TestGetNonExistingEndpointFailsWithNotFoundError() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.GetGithubEndpoint(ctx, "non-existing")
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestDeletingNonExistingEndpointIsANoop() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ err := s.db.DeleteGithubEndpoint(ctx, "non-existing")
+ s.Require().NoError(err)
+}
+
+func (s *GithubTestSuite) TestDeletingEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ err = s.db.DeleteGithubEndpoint(ctx, testEndpointName)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGithubEndpoint(ctx, testEndpointName)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestDeleteGithubEndpointFailsWhenCredentialsExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: testEndpointName,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err = s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGithubEndpoint(ctx, testEndpointName)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+}
+
+func (s *GithubTestSuite) TestUpdateEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ newDescription := "the new description"
+ newAPIBaseURL := "https://new-api.example.com"
+ newUploadBaseURL := "https://new-uploads.example.com"
+ newBaseURL := "https://new.example.com"
+ caCertBundle, err := os.ReadFile("../../testdata/certs/srv-pub.pem")
+ s.Require().NoError(err)
+ updateEpParams := params.UpdateGithubEndpointParams{
+ Description: &newDescription,
+ APIBaseURL: &newAPIBaseURL,
+ UploadBaseURL: &newUploadBaseURL,
+ BaseURL: &newBaseURL,
+ CACertBundle: caCertBundle,
+ }
+
+ updatedEndpoint, err := s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(updatedEndpoint)
+ s.Require().Equal(newDescription, updatedEndpoint.Description)
+ s.Require().Equal(newAPIBaseURL, updatedEndpoint.APIBaseURL)
+ s.Require().Equal(newUploadBaseURL, updatedEndpoint.UploadBaseURL)
+ s.Require().Equal(newBaseURL, updatedEndpoint.BaseURL)
+ s.Require().Equal(caCertBundle, updatedEndpoint.CACertBundle)
+}
+
+func (s *GithubTestSuite) TestUpdateEndpointURLsFailsIfCredentialsAreAssociated() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: testEndpointName,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err = s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ newDescription := "new description"
+ newBaseURL := "https://new.example.com"
+ newAPIBaseURL := "https://new-api.example.com"
+ newUploadBaseURL := "https://new-uploads.example.com"
+ updateEpParams := params.UpdateGithubEndpointParams{
+ BaseURL: &newBaseURL,
+ }
+
+ _, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
+
+ updateEpParams = params.UpdateGithubEndpointParams{
+ UploadBaseURL: &newUploadBaseURL,
+ }
+
+ _, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
+
+ updateEpParams = params.UpdateGithubEndpointParams{
+ APIBaseURL: &newAPIBaseURL,
+ }
+ _, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
+
+ updateEpParams = params.UpdateGithubEndpointParams{
+ Description: &newDescription,
+ }
+ ret, err := s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDescription, ret.Description)
+}
+
+func (s *GithubTestSuite) TestUpdatingNonExistingEndpointReturnsNotFoundError() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ newDescription := "test"
+ updateEpParams := params.UpdateGithubEndpointParams{
+ Description: &newDescription,
+ }
+
+ _, err := s.db.UpdateGithubEndpoint(ctx, "non-existing", updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestListEndpoints() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ endpoints, err := s.db.ListGithubEndpoints(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(endpoints, 2)
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsWithUnauthorizedForAnonUser() {
+ ctx := context.Background()
+
+ _, err := s.db.CreateGithubCredentials(ctx, params.CreateGithubCredentialsParams{})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrUnauthorized)
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsWhenEndpointNameIsEmpty() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGithubCredentials(ctx, params.CreateGithubCredentialsParams{})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Regexp("endpoint name is required", err.Error())
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsWhenEndpointDoesNotExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGithubCredentials(ctx, params.CreateGithubCredentialsParams{Endpoint: "non-existing"})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+ s.Require().Regexp("endpoint not found", err.Error())
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsWhenAuthTypeIsInvalid() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGithubCredentials(ctx, params.CreateGithubCredentialsParams{Endpoint: defaultGithubEndpoint, AuthType: "invalid"})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Regexp("invalid auth type", err.Error())
+}
+
+func (s *GithubTestSuite) TestCreateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+ s.Require().Equal(credParams.Name, creds.Name)
+ s.Require().Equal(credParams.Description, creds.Description)
+ s.Require().Equal(credParams.Endpoint, creds.Endpoint.Name)
+ s.Require().Equal(credParams.AuthType, creds.AuthType)
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsOnDuplicateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "testuser", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ // Creating creds with the same parameters should fail for the same user.
+ _, err = s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrDuplicateEntity)
+
+ // Creating creds with the same parameters should work for different users.
+ _, err = s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+}
+
+func (s *GithubTestSuite) TestNormalUsersCanOnlySeeTheirOwnCredentialsAdminCanSeeAll() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "testuser1", s.db, s.T())
+ testUser2 := garmTesting.CreateGARMTestUser(ctx, "testuser2", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+ testUser2Ctx := auth.PopulateContext(context.Background(), testUser2, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ credParams.Name = "test-creds2"
+ creds2, err := s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ credParams.Name = "test-creds3"
+ creds3, err := s.db.CreateGithubCredentials(testUser2Ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds3)
+
+ credsList, err := s.db.ListGithubCredentials(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 3)
+
+ credsList, err = s.db.ListGithubCredentials(testUserCtx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 1)
+ s.Require().Equal("test-creds2", credsList[0].Name)
+
+ credsList, err = s.db.ListGithubCredentials(testUser2Ctx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 1)
+ s.Require().Equal("test-creds3", credsList[0].Name)
+}
+
+func (s *GithubTestSuite) TestGetGithubCredentialsFailsWhenCredentialsDontExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.GetGithubCredentials(ctx, 1, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+
+ _, err = s.db.GetGithubCredentialsByName(ctx, "non-existing", true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestGetGithubCredentialsByNameReturnsOnlyCurrentUserCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user1", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ creds2, err := s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ creds2Get, err := s.db.GetGithubCredentialsByName(testUserCtx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(testCredsName, creds2Get.Name)
+ s.Require().Equal(creds2.ID, creds2Get.ID)
+
+ credsGet, err := s.db.GetGithubCredentialsByName(ctx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+ s.Require().Equal(testCredsName, credsGet.Name)
+ s.Require().Equal(creds.ID, credsGet.ID)
+
+ // Admin can get any creds by ID
+ credsGet, err = s.db.GetGithubCredentials(ctx, creds2.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds2.ID, credsGet.ID)
+
+ // Normal user cannot get other user creds by ID
+ _, err = s.db.GetGithubCredentials(testUserCtx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestGetGithubCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ creds2, err := s.db.GetGithubCredentialsByName(ctx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds.Name, creds2.Name)
+ s.Require().Equal(creds.ID, creds2.ID)
+
+ creds2, err = s.db.GetGithubCredentials(ctx, creds.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds.Name, creds2.Name)
+ s.Require().Equal(creds.ID, creds2.ID)
+}
+
+func (s *GithubTestSuite) TestDeleteGithubCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGithubCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestDeleteGithubCredentialsByNonAdminUser() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user4", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds4",
+ },
+ }
+
+ // Create creds as admin
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ // Deleting non existent creds will return a nil error. For the test user
+ // the creds created by the admin should not be visible, which leads to not found
+ // which in turn returns no error.
+ err = s.db.DeleteGithubCredentials(testUserCtx, creds.ID)
+ s.Require().NoError(err)
+
+ // Check that the creds created by the admin are still there.
+ credsGet, err := s.db.GetGithubCredentials(ctx, creds.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(credsGet)
+ s.Require().Equal(creds.ID, credsGet.ID)
+
+ // Create the same creds with the test user.
+ creds2, err := s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ // Remove creds created by test user.
+ err = s.db.DeleteGithubCredentials(testUserCtx, creds2.ID)
+ s.Require().NoError(err)
+
+ // The creds created by the test user should be gone.
+ _, err = s.db.GetGithubCredentials(testUserCtx, creds2.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestDeleteCredentialsFailsIfReposOrgsOrEntitiesUseIt() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ enterprise, err := s.db.CreateEnterprise(ctx, "test-enterprise", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(enterprise)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteEnterprise(ctx, enterprise.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGithubCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestUpdateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "new description"
+ newName := "new-name"
+ newToken := "new-token"
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ Description: &newDescription,
+ Name: &newName,
+ PAT: ¶ms.GithubPAT{
+ OAuth2Token: newToken,
+ },
+ }
+
+ updatedCreds, err := s.db.UpdateGithubCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(updatedCreds)
+ s.Require().Equal(newDescription, updatedCreds.Description)
+ s.Require().Equal(newName, updatedCreds.Name)
+}
+
+func (s *GithubTestSuite) TestUpdateGithubCredentialsFailIfWrongCredentialTypeIsPassed() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ App: ¶ms.GithubApp{
+ AppID: 1,
+ InstallationID: 2,
+ PrivateKeyBytes: []byte("test"),
+ },
+ }
+
+ _, err = s.db.UpdateGithubCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github credentials: cannot update app credentials for PAT: invalid request")
+
+ credParamsWithApp := params.CreateGithubCredentialsParams{
+ Name: "test-credsApp",
+ Description: "test credsApp",
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypeApp,
+ App: params.GithubApp{
+ AppID: 1,
+ InstallationID: 2,
+ PrivateKeyBytes: []byte("test"),
+ },
+ }
+
+ credsApp, err := s.db.CreateGithubCredentials(ctx, credParamsWithApp)
+ s.Require().NoError(err)
+ s.Require().NotNil(credsApp)
+
+ updateCredParams = params.UpdateGithubCredentialsParams{
+ PAT: ¶ms.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err = s.db.UpdateGithubCredentials(ctx, credsApp.ID, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github credentials: cannot update PAT credentials for app: invalid request")
+}
+
+func (s *GithubTestSuite) TestUpdateCredentialsFailsForNonExistingCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ Description: nil,
+ }
+
+ _, err := s.db.UpdateGithubCredentials(ctx, 1, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestUpdateCredentialsFailsIfCredentialsAreOwnedByNonAdminUser() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user5", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "new description2"
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ Description: &newDescription,
+ }
+
+ _, err = s.db.UpdateGithubCredentials(testUserCtx, creds.ID, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestAdminUserCanUpdateAnyGithubCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user5", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "new description2"
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ Description: &newDescription,
+ }
+
+ newCreds, err := s.db.UpdateGithubCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDescription, newCreds.Description)
+}
+
+func (s *GithubTestSuite) TestDeleteGithubEndpointFailsWithOrgsReposOrCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ ep, err := s.db.CreateGithubEndpoint(ctx, endpointParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(ep)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: ep.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ badRequest := &runnerErrors.BadRequestError{}
+ err = s.db.DeleteGithubEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorAs(err, &badRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGithubEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorAs(err, &badRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGithubEndpoint(ctx, ep.Name)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGithubEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func TestGithubTestSuite(t *testing.T) {
+ suite.Run(t, new(GithubTestSuite))
+}
+
+func TestCredentialsAndEndpointMigration(t *testing.T) {
+ cfg := garmTesting.GetTestSqliteDBConfig(t)
+
+ // Copy the sample DB
+ data, err := os.ReadFile("../../testdata/db/v0.1.4/garm.db")
+ if err != nil {
+ t.Fatalf("failed to read test data: %s", err)
+ }
+
+ if cfg.SQLite.DBFile == "" {
+ t.Fatalf("DB file not set")
+ }
+ if err := os.WriteFile(cfg.SQLite.DBFile, data, 0o600); err != nil {
+ t.Fatalf("failed to write test data: %s", err)
+ }
+
+ // define some credentials
+ credentials := []config.Github{
+ {
+ Name: "test-creds",
+ Description: "test creds",
+ AuthType: config.GithubAuthTypePAT,
+ PAT: config.GithubPAT{
+ OAuth2Token: "test",
+ },
+ },
+ {
+ Name: "ghes-test",
+ Description: "ghes creds",
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ AuthType: config.GithubAuthTypeApp,
+ App: config.GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "../../testdata/certs/srv-key.pem",
+ },
+ },
+ }
+ // Set the config credentials in the cfg. This is what happens in the main function.
+ // of GARM as well.
+ cfg.MigrateCredentials = credentials
+
+ db, err := NewSQLDatabase(context.Background(), cfg)
+ if err != nil {
+ t.Fatalf("failed to create db connection: %s", err)
+ }
+
+ // We expect that 2 endpoints will exist in the migrated DB and 2 credentials.
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), db, t)
+
+ endpoints, err := db.ListGithubEndpoints(ctx)
+ if err != nil {
+ t.Fatalf("failed to list endpoints: %s", err)
+ }
+ if len(endpoints) != 2 {
+ t.Fatalf("expected 2 endpoints, got %d", len(endpoints))
+ }
+ if endpoints[0].Name != defaultGithubEndpoint {
+ t.Fatalf("expected default endpoint to exist, got %s", endpoints[0].Name)
+ }
+ if endpoints[1].Name != "example.com" {
+ t.Fatalf("expected example.com endpoint to exist, got %s", endpoints[1].Name)
+ }
+ if endpoints[1].UploadBaseURL != testUploadBaseURL {
+ t.Fatalf("expected upload base URL to be %s, got %s", testUploadBaseURL, endpoints[1].UploadBaseURL)
+ }
+ if endpoints[1].BaseURL != testBaseURL {
+ t.Fatalf("expected base URL to be %s, got %s", testBaseURL, endpoints[1].BaseURL)
+ }
+ if endpoints[1].APIBaseURL != testAPIBaseURL {
+ t.Fatalf("expected API base URL to be %s, got %s", testAPIBaseURL, endpoints[1].APIBaseURL)
+ }
+
+ creds, err := db.ListGithubCredentials(ctx)
+ if err != nil {
+ t.Fatalf("failed to list credentials: %s", err)
+ }
+ if len(creds) != 2 {
+ t.Fatalf("expected 2 credentials, got %d", len(creds))
+ }
+ if creds[0].Name != "test-creds" {
+ t.Fatalf("expected test-creds to exist, got %s", creds[0].Name)
+ }
+ if creds[1].Name != "ghes-test" {
+ t.Fatalf("expected ghes-test to exist, got %s", creds[1].Name)
+ }
+ if creds[0].Endpoint.Name != defaultGithubEndpoint {
+ t.Fatalf("expected test-creds to be associated with default endpoint, got %s", creds[0].Endpoint.Name)
+ }
+ if creds[1].Endpoint.Name != "example.com" {
+ t.Fatalf("expected ghes-test to be associated with example.com endpoint, got %s", creds[1].Endpoint.Name)
+ }
+
+ if creds[0].AuthType != params.ForgeAuthTypePAT {
+ t.Fatalf("expected test-creds to have PAT auth type, got %s", creds[0].AuthType)
+ }
+ if creds[1].AuthType != params.ForgeAuthTypeApp {
+ t.Fatalf("expected ghes-test to have App auth type, got %s", creds[1].AuthType)
+ }
+ if len(creds[0].CredentialsPayload) == 0 {
+ t.Fatalf("expected test-creds to have credentials payload, got empty")
+ }
+
+ var pat params.GithubPAT
+ if err := json.Unmarshal(creds[0].CredentialsPayload, &pat); err != nil {
+ t.Fatalf("failed to unmarshal test-creds credentials payload: %s", err)
+ }
+ if pat.OAuth2Token != "test" {
+ t.Fatalf("expected test-creds to have PAT token test, got %s", pat.OAuth2Token)
+ }
+
+ var app params.GithubApp
+ if err := json.Unmarshal(creds[1].CredentialsPayload, &app); err != nil {
+ t.Fatalf("failed to unmarshal ghes-test credentials payload: %s", err)
+ }
+ if app.AppID != 1 {
+ t.Fatalf("expected ghes-test to have app ID 1, got %d", app.AppID)
+ }
+ if app.InstallationID != 99 {
+ t.Fatalf("expected ghes-test to have installation ID 99, got %d", app.InstallationID)
+ }
+ if app.PrivateKeyBytes == nil {
+ t.Fatalf("expected ghes-test to have private key bytes, got nil")
+ }
+
+ certBundle, err := credentials[1].App.PrivateKeyBytes()
+ if err != nil {
+ t.Fatalf("failed to read CA cert bundle: %s", err)
+ }
+
+ if !bytes.Equal(app.PrivateKeyBytes, certBundle) {
+ t.Fatalf("expected ghes-test private key to be equal to the CA cert bundle")
+ }
+}
diff --git a/database/sql/instances.go b/database/sql/instances.go
index 12c833af..5f9d018e 100644
--- a/database/sql/instances.go
+++ b/database/sql/instances.go
@@ -17,28 +17,45 @@ package sql
import (
"context"
"encoding/json"
-
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm/params"
+ "errors"
+ "fmt"
+ "log/slog"
"github.com/google/uuid"
- "github.com/pkg/errors"
"gorm.io/datatypes"
"gorm.io/gorm"
"gorm.io/gorm/clause"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) CreateInstance(ctx context.Context, poolID string, param params.CreateInstanceParams) (params.Instance, error) {
- pool, err := s.getPoolByID(ctx, poolID)
+func (s *sqlDatabase) CreateInstance(_ context.Context, poolID string, param params.CreateInstanceParams) (instance params.Instance, err error) {
+ pool, err := s.getPoolByID(s.conn, poolID)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching pool")
+ return params.Instance{}, fmt.Errorf("error fetching pool: %w", err)
}
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.InstanceEntityType, common.CreateOperation, instance)
+ }
+ }()
+
var labels datatypes.JSON
if len(param.AditionalLabels) > 0 {
labels, err = json.Marshal(param.AditionalLabels)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "marshalling labels")
+ return params.Instance{}, fmt.Errorf("error marshalling labels: %w", err)
+ }
+ }
+
+ var secret []byte
+ if len(param.JitConfiguration) > 0 {
+ secret, err = s.marshalAndSeal(param.JitConfiguration)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error marshalling jit config: %w", err)
}
}
@@ -52,36 +69,22 @@ func (s *sqlDatabase) CreateInstance(ctx context.Context, poolID string, param p
CallbackURL: param.CallbackURL,
MetadataURL: param.MetadataURL,
GitHubRunnerGroup: param.GitHubRunnerGroup,
+ JitConfiguration: secret,
AditionalLabels: labels,
+ AgentID: param.AgentID,
}
q := s.conn.Create(&newInstance)
if q.Error != nil {
- return params.Instance{}, errors.Wrap(q.Error, "creating instance")
+ return params.Instance{}, fmt.Errorf("error creating instance: %w", q.Error)
}
- return s.sqlToParamsInstance(newInstance), nil
+ return s.sqlToParamsInstance(newInstance)
}
-func (s *sqlDatabase) getInstanceByID(ctx context.Context, instanceID string) (Instance, error) {
- u, err := uuid.Parse(instanceID)
+func (s *sqlDatabase) getPoolInstanceByName(poolID string, instanceName string) (Instance, error) {
+ pool, err := s.getPoolByID(s.conn, poolID)
if err != nil {
- return Instance{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
- }
- var instance Instance
- q := s.conn.Model(&Instance{}).
- Preload(clause.Associations).
- Where("id = ?", u).
- First(&instance)
- if q.Error != nil {
- return Instance{}, errors.Wrap(q.Error, "fetching instance")
- }
- return instance, nil
-}
-
-func (s *sqlDatabase) getPoolInstanceByName(ctx context.Context, poolID string, instanceName string) (Instance, error) {
- pool, err := s.getPoolByID(ctx, poolID)
- if err != nil {
- return Instance{}, errors.Wrap(err, "fetching pool")
+ return Instance{}, fmt.Errorf("error fetching pool: %w", err)
}
var instance Instance
@@ -91,16 +94,25 @@ func (s *sqlDatabase) getPoolInstanceByName(ctx context.Context, poolID string,
First(&instance)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return Instance{}, errors.Wrap(runnerErrors.ErrNotFound, "fetching pool instance by name")
+ return Instance{}, fmt.Errorf("error fetching pool instance by name: %w", runnerErrors.ErrNotFound)
}
- return Instance{}, errors.Wrap(q.Error, "fetching pool instance by name")
+ return Instance{}, fmt.Errorf("error fetching pool instance by name: %w", q.Error)
}
+
+ instance.Pool = pool
return instance, nil
}
-func (s *sqlDatabase) getInstanceByName(ctx context.Context, instanceName string, preload ...string) (Instance, error) {
+func (s *sqlDatabase) getInstance(_ context.Context, instanceNameOrID string, preload ...string) (Instance, error) {
var instance Instance
+ var whereArg any = instanceNameOrID
+ whereClause := "name = ?"
+ id, err := uuid.Parse(instanceNameOrID)
+ if err == nil {
+ whereArg = id
+ whereClause = "id = ?"
+ }
q := s.conn
if len(preload) > 0 {
@@ -111,79 +123,115 @@ func (s *sqlDatabase) getInstanceByName(ctx context.Context, instanceName string
q = q.Model(&Instance{}).
Preload(clause.Associations).
- Where("name = ?", instanceName).
+ Where(whereClause, whereArg).
First(&instance)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return Instance{}, errors.Wrap(runnerErrors.ErrNotFound, "fetching instance by name")
+ return Instance{}, fmt.Errorf("error fetching instance by name: %w", runnerErrors.ErrNotFound)
}
- return Instance{}, errors.Wrap(q.Error, "fetching instance by name")
+ return Instance{}, fmt.Errorf("error fetching instance by name: %w", q.Error)
}
return instance, nil
}
-func (s *sqlDatabase) GetPoolInstanceByName(ctx context.Context, poolID string, instanceName string) (params.Instance, error) {
- instance, err := s.getPoolInstanceByName(ctx, poolID, instanceName)
+func (s *sqlDatabase) GetInstance(ctx context.Context, instanceName string) (params.Instance, error) {
+ instance, err := s.getInstance(ctx, instanceName, "StatusMessages", "Pool", "ScaleSet")
if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
+ return params.Instance{}, fmt.Errorf("error fetching instance: %w", err)
}
- return s.sqlToParamsInstance(instance), nil
+ return s.sqlToParamsInstance(instance)
}
-func (s *sqlDatabase) GetInstanceByName(ctx context.Context, instanceName string) (params.Instance, error) {
- instance, err := s.getInstanceByName(ctx, instanceName, "StatusMessages")
+func (s *sqlDatabase) DeleteInstance(_ context.Context, poolID string, instanceName string) (err error) {
+ instance, err := s.getPoolInstanceByName(poolID, instanceName)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error deleting instance: %w", err)
}
- return s.sqlToParamsInstance(instance), nil
-}
+ defer func() {
+ if err == nil {
+ var providerID string
+ if instance.ProviderID != nil {
+ providerID = *instance.ProviderID
+ }
+ instanceNotif := params.Instance{
+ ID: instance.ID.String(),
+ Name: instance.Name,
+ ProviderID: providerID,
+ AgentID: instance.AgentID,
+ }
+ switch {
+ case instance.PoolID != nil:
+ instanceNotif.PoolID = instance.PoolID.String()
+ case instance.ScaleSetFkID != nil:
+ instanceNotif.ScaleSetID = *instance.ScaleSetFkID
+ }
+
+ if notifyErr := s.sendNotify(common.InstanceEntityType, common.DeleteOperation, instanceNotif); notifyErr != nil {
+ slog.With(slog.Any("error", notifyErr)).Error("failed to send notify")
+ }
+ }
+ }()
-func (s *sqlDatabase) DeleteInstance(ctx context.Context, poolID string, instanceName string) error {
- instance, err := s.getPoolInstanceByName(ctx, poolID, instanceName)
- if err != nil {
- return errors.Wrap(err, "deleting instance")
- }
if q := s.conn.Unscoped().Delete(&instance); q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return nil
}
- return errors.Wrap(q.Error, "deleting instance")
+ return fmt.Errorf("error deleting instance: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) ListInstanceEvents(ctx context.Context, instanceID string, eventType params.EventType, eventLevel params.EventLevel) ([]params.StatusMessage, error) {
- var events []InstanceStatusUpdate
- query := s.conn.Model(&InstanceStatusUpdate{}).Where("instance_id = ?", instanceID)
- if eventLevel != "" {
- query = query.Where("event_level = ?", eventLevel)
- }
-
- if eventType != "" {
- query = query.Where("event_type = ?", eventType)
- }
-
- if result := query.Find(&events); result.Error != nil {
- return nil, errors.Wrap(result.Error, "fetching events")
- }
-
- eventParams := make([]params.StatusMessage, len(events))
- for idx, val := range events {
- eventParams[idx] = params.StatusMessage{
- Message: val.Message,
- EventType: val.EventType,
- EventLevel: val.EventLevel,
+func (s *sqlDatabase) DeleteInstanceByName(ctx context.Context, instanceName string) error {
+ instance, err := s.getInstance(ctx, instanceName, "Pool", "ScaleSet")
+ if err != nil {
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ return nil
}
+ return fmt.Errorf("error deleting instance: %w", err)
}
- return eventParams, nil
+
+ defer func() {
+ if err == nil {
+ var providerID string
+ if instance.ProviderID != nil {
+ providerID = *instance.ProviderID
+ }
+ payload := params.Instance{
+ ID: instance.ID.String(),
+ Name: instance.Name,
+ ProviderID: providerID,
+ AgentID: instance.AgentID,
+ }
+ if instance.PoolID != nil {
+ payload.PoolID = instance.PoolID.String()
+ }
+ if instance.ScaleSetFkID != nil {
+ payload.ScaleSetID = *instance.ScaleSetFkID
+ }
+ if notifyErr := s.sendNotify(common.InstanceEntityType, common.DeleteOperation, payload); notifyErr != nil {
+ slog.With(slog.Any("error", notifyErr)).Error("failed to send notify")
+ }
+ }
+ }()
+
+ if q := s.conn.Unscoped().Delete(&instance); q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error deleting instance: %w", q.Error)
+ }
+ return nil
}
-func (s *sqlDatabase) AddInstanceEvent(ctx context.Context, instanceID string, event params.EventType, eventLevel params.EventLevel, statusMessage string) error {
- instance, err := s.getInstanceByID(ctx, instanceID)
+func (s *sqlDatabase) AddInstanceEvent(ctx context.Context, instanceName string, event params.EventType, eventLevel params.EventLevel, statusMessage string) error {
+ instance, err := s.getInstance(ctx, instanceName)
if err != nil {
- return errors.Wrap(err, "updating instance")
+ return fmt.Errorf("error updating instance: %w", err)
}
msg := InstanceStatusUpdate{
@@ -193,15 +241,15 @@ func (s *sqlDatabase) AddInstanceEvent(ctx context.Context, instanceID string, e
}
if err := s.conn.Model(&instance).Association("StatusMessages").Append(&msg); err != nil {
- return errors.Wrap(err, "adding status message")
+ return fmt.Errorf("error adding status message: %w", err)
}
return nil
}
-func (s *sqlDatabase) UpdateInstance(ctx context.Context, instanceID string, param params.UpdateInstanceParams) (params.Instance, error) {
- instance, err := s.getInstanceByID(ctx, instanceID)
+func (s *sqlDatabase) UpdateInstance(ctx context.Context, instanceName string, param params.UpdateInstanceParams) (params.Instance, error) {
+ instance, err := s.getInstance(ctx, instanceName, "Pool", "ScaleSet")
if err != nil {
- return params.Instance{}, errors.Wrap(err, "updating instance")
+ return params.Instance{}, fmt.Errorf("error updating instance: %w", err)
}
if param.AgentID != 0 {
@@ -235,11 +283,19 @@ func (s *sqlDatabase) UpdateInstance(ctx context.Context, instanceID string, par
instance.TokenFetched = *param.TokenFetched
}
+ if param.JitConfiguration != nil {
+ secret, err := s.marshalAndSeal(param.JitConfiguration)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error marshalling jit config: %w", err)
+ }
+ instance.JitConfiguration = secret
+ }
+
instance.ProviderFault = param.ProviderFault
q := s.conn.Save(&instance)
if q.Error != nil {
- return params.Instance{}, errors.Wrap(q.Error, "updating instance")
+ return params.Instance{}, fmt.Errorf("error updating instance: %w", q.Error)
}
if len(param.Addresses) > 0 {
@@ -251,57 +307,75 @@ func (s *sqlDatabase) UpdateInstance(ctx context.Context, instanceID string, par
})
}
if err := s.conn.Model(&instance).Association("Addresses").Replace(addrs); err != nil {
- return params.Instance{}, errors.Wrap(err, "updating addresses")
+ return params.Instance{}, fmt.Errorf("error updating addresses: %w", err)
}
}
-
- return s.sqlToParamsInstance(instance), nil
+ inst, err := s.sqlToParamsInstance(instance)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error converting instance: %w", err)
+ }
+ s.sendNotify(common.InstanceEntityType, common.UpdateOperation, inst)
+ return inst, nil
}
-func (s *sqlDatabase) ListPoolInstances(ctx context.Context, poolID string) ([]params.Instance, error) {
+func (s *sqlDatabase) ListPoolInstances(_ context.Context, poolID string) ([]params.Instance, error) {
u, err := uuid.Parse(poolID)
if err != nil {
- return nil, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return nil, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
var instances []Instance
- query := s.conn.Model(&Instance{}).Where("pool_id = ?", u)
+ query := s.conn.
+ Preload("Pool").
+ Preload("Job").
+ Where("pool_id = ?", u)
if err := query.Find(&instances); err.Error != nil {
- return nil, errors.Wrap(err.Error, "fetching instances")
+ return nil, fmt.Errorf("error fetching instances: %w", err.Error)
}
ret := make([]params.Instance, len(instances))
for idx, inst := range instances {
- ret[idx] = s.sqlToParamsInstance(inst)
+ ret[idx], err = s.sqlToParamsInstance(inst)
+ if err != nil {
+ return nil, fmt.Errorf("error converting instance: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) ListAllInstances(ctx context.Context) ([]params.Instance, error) {
+func (s *sqlDatabase) ListAllInstances(_ context.Context) ([]params.Instance, error) {
var instances []Instance
- q := s.conn.Model(&Instance{}).Find(&instances)
+ q := s.conn.
+ Preload("Pool").
+ Preload("ScaleSet").
+ Preload("Job").
+ Find(&instances)
if q.Error != nil {
- return nil, errors.Wrap(q.Error, "fetching instances")
+ return nil, fmt.Errorf("error fetching instances: %w", q.Error)
}
ret := make([]params.Instance, len(instances))
+ var err error
for idx, instance := range instances {
- ret[idx] = s.sqlToParamsInstance(instance)
+ ret[idx], err = s.sqlToParamsInstance(instance)
+ if err != nil {
+ return nil, fmt.Errorf("error converting instance: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) PoolInstanceCount(ctx context.Context, poolID string) (int64, error) {
- pool, err := s.getPoolByID(ctx, poolID)
+func (s *sqlDatabase) PoolInstanceCount(_ context.Context, poolID string) (int64, error) {
+ pool, err := s.getPoolByID(s.conn, poolID)
if err != nil {
- return 0, errors.Wrap(err, "fetching pool")
+ return 0, fmt.Errorf("error fetching pool: %w", err)
}
var cnt int64
q := s.conn.Model(&Instance{}).Where("pool_id = ?", pool.ID).Count(&cnt)
if q.Error != nil {
- return 0, errors.Wrap(q.Error, "fetching instance count")
+ return 0, fmt.Errorf("error fetching instance count: %w", q.Error)
}
return cnt, nil
}
diff --git a/database/sql/instances_test.go b/database/sql/instances_test.go
index d47b265e..5ec55107 100644
--- a/database/sql/instances_test.go
+++ b/database/sql/instances_test.go
@@ -22,18 +22,16 @@ import (
"sort"
"testing"
- commonParams "github.com/cloudbase/garm-provider-common/params"
-
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
-
- "gopkg.in/DATA-DOG/go-sqlmock.v1"
-
"github.com/stretchr/testify/suite"
+ "gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type InstancesTestFixtures struct {
@@ -50,6 +48,7 @@ type InstancesTestSuite struct {
Store dbCommon.Store
StoreSQLMocked *sqlDatabase
Fixtures *InstancesTestFixtures
+ adminCtx context.Context
}
func (s *InstancesTestSuite) equalInstancesByName(expected, actual []params.Instance) {
@@ -78,8 +77,14 @@ func (s *InstancesTestSuite) SetupTest() {
}
s.Store = db
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+
+ githubEndpoint := garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), githubEndpoint)
+
// create an organization for testing purposes
- org, err := s.Store.CreateOrganization(context.Background(), "test-org", "test-creds", "test-webhookSecret")
+ org, err := s.Store.CreateOrganization(s.adminCtx, "test-org", creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create org: %s", err))
}
@@ -92,9 +97,11 @@ func (s *InstancesTestSuite) SetupTest() {
Image: "test-image",
Flavor: "test-flavor",
OSType: "linux",
- Tags: []string{"self-hosted", "amd64", "linux"},
+ Tags: []string{"amd64", "linux"},
}
- pool, err := s.Store.CreateOrganizationPool(context.Background(), org.ID, createPoolParams)
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, createPoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create org pool: %s", err))
}
@@ -103,7 +110,7 @@ func (s *InstancesTestSuite) SetupTest() {
instances := []params.Instance{}
for i := 1; i <= 3; i++ {
instance, err := db.CreateInstance(
- context.Background(),
+ s.adminCtx,
pool.ID,
params.CreateInstanceParams{
Name: fmt.Sprintf("test-instance-%d", i),
@@ -112,6 +119,12 @@ func (s *InstancesTestSuite) SetupTest() {
CallbackURL: "https://garm.example.com/",
Status: commonParams.InstanceRunning,
RunnerStatus: params.RunnerIdle,
+ JitConfiguration: map[string]string{
+ "secret": fmt.Sprintf("secret-%d", i),
+ },
+ AditionalLabels: []string{
+ fmt.Sprintf("label-%d", i),
+ },
},
)
if err != nil {
@@ -131,7 +144,7 @@ func (s *InstancesTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -179,11 +192,11 @@ func (s *InstancesTestSuite) SetupTest() {
func (s *InstancesTestSuite) TestCreateInstance() {
// call tested function
- instance, err := s.Store.CreateInstance(context.Background(), s.Fixtures.Pool.ID, s.Fixtures.CreateInstanceParams)
+ instance, err := s.Store.CreateInstance(s.adminCtx, s.Fixtures.Pool.ID, s.Fixtures.CreateInstanceParams)
// assertions
s.Require().Nil(err)
- storeInstance, err := s.Store.GetInstanceByName(context.Background(), s.Fixtures.CreateInstanceParams.Name)
+ storeInstance, err := s.Store.GetInstance(s.adminCtx, s.Fixtures.CreateInstanceParams.Name)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get instance: %v", err))
}
@@ -195,17 +208,17 @@ func (s *InstancesTestSuite) TestCreateInstance() {
}
func (s *InstancesTestSuite) TestCreateInstanceInvalidPoolID() {
- _, err := s.Store.CreateInstance(context.Background(), "dummy-pool-id", params.CreateInstanceParams{})
+ _, err := s.Store.CreateInstance(s.adminCtx, "dummy-pool-id", params.CreateInstanceParams{})
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
}
func (s *InstancesTestSuite) TestCreateInstanceDBCreateErr() {
pool := s.Fixtures.Pool
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -216,36 +229,17 @@ func (s *InstancesTestSuite) TestCreateInstanceDBCreateErr() {
WillReturnError(fmt.Errorf("mocked insert instance error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateInstance(context.Background(), pool.ID, s.Fixtures.CreateInstanceParams)
+ _, err := s.StoreSQLMocked.CreateInstance(s.adminCtx, pool.ID, s.Fixtures.CreateInstanceParams)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating instance: mocked insert instance error", err.Error())
-}
-
-func (s *InstancesTestSuite) TestGetPoolInstanceByName() {
- storeInstance := s.Fixtures.Instances[0] // this is already created in `SetupTest()`
-
- instance, err := s.Store.GetPoolInstanceByName(context.Background(), s.Fixtures.Pool.ID, storeInstance.Name)
-
- s.Require().Nil(err)
- s.Require().Equal(storeInstance.Name, instance.Name)
- s.Require().Equal(storeInstance.PoolID, instance.PoolID)
- s.Require().Equal(storeInstance.OSArch, instance.OSArch)
- s.Require().Equal(storeInstance.OSType, instance.OSType)
- s.Require().Equal(storeInstance.CallbackURL, instance.CallbackURL)
-}
-
-func (s *InstancesTestSuite) TestGetPoolInstanceByNameNotFound() {
- _, err := s.Store.GetPoolInstanceByName(context.Background(), s.Fixtures.Pool.ID, "not-existent-instance-name")
-
- s.Require().Equal("fetching instance: fetching pool instance by name: not found", err.Error())
+ s.Require().Equal("error creating instance: mocked insert instance error", err.Error())
}
func (s *InstancesTestSuite) TestGetInstanceByName() {
storeInstance := s.Fixtures.Instances[1]
- instance, err := s.Store.GetInstanceByName(context.Background(), storeInstance.Name)
+ instance, err := s.Store.GetInstance(s.adminCtx, storeInstance.Name)
s.Require().Nil(err)
s.Require().Equal(storeInstance.Name, instance.Name)
@@ -256,26 +250,43 @@ func (s *InstancesTestSuite) TestGetInstanceByName() {
}
func (s *InstancesTestSuite) TestGetInstanceByNameFetchInstanceFailed() {
- _, err := s.Store.GetInstanceByName(context.Background(), "not-existent-instance-name")
+ _, err := s.Store.GetInstance(s.adminCtx, "not-existent-instance-name")
- s.Require().Equal("fetching instance: fetching instance by name: not found", err.Error())
+ s.Require().Equal("error fetching instance: error fetching instance by name: not found", err.Error())
}
func (s *InstancesTestSuite) TestDeleteInstance() {
storeInstance := s.Fixtures.Instances[0]
- err := s.Store.DeleteInstance(context.Background(), s.Fixtures.Pool.ID, storeInstance.Name)
+ err := s.Store.DeleteInstance(s.adminCtx, s.Fixtures.Pool.ID, storeInstance.Name)
s.Require().Nil(err)
- _, err = s.Store.GetPoolInstanceByName(context.Background(), s.Fixtures.Pool.ID, storeInstance.Name)
- s.Require().Equal("fetching instance: fetching pool instance by name: not found", err.Error())
+ _, err = s.Store.GetInstance(s.adminCtx, storeInstance.Name)
+ s.Require().Equal("error fetching instance: error fetching instance by name: not found", err.Error())
+
+ err = s.Store.DeleteInstance(s.adminCtx, s.Fixtures.Pool.ID, storeInstance.Name)
+ s.Require().Nil(err)
+}
+
+func (s *InstancesTestSuite) TestDeleteInstanceByName() {
+ storeInstance := s.Fixtures.Instances[0]
+
+ err := s.Store.DeleteInstanceByName(s.adminCtx, storeInstance.Name)
+
+ s.Require().Nil(err)
+
+ _, err = s.Store.GetInstance(s.adminCtx, storeInstance.Name)
+ s.Require().Equal("error fetching instance: error fetching instance by name: not found", err.Error())
+
+ err = s.Store.DeleteInstanceByName(s.adminCtx, storeInstance.Name)
+ s.Require().Nil(err)
}
func (s *InstancesTestSuite) TestDeleteInstanceInvalidPoolID() {
- err := s.Store.DeleteInstance(context.Background(), "dummy-pool-id", "dummy-instance-name")
+ err := s.Store.DeleteInstance(s.adminCtx, "dummy-pool-id", "dummy-instance-name")
- s.Require().Equal("deleting instance: fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error deleting instance: error fetching pool: error parsing id: invalid request", err.Error())
}
func (s *InstancesTestSuite) TestDeleteInstanceDBRecordNotFoundErr() {
@@ -283,17 +294,21 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBRecordNotFoundErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.Name, pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("10.10.1.10", "private", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -305,7 +320,7 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBRecordNotFoundErr() {
WillReturnError(gorm.ErrRecordNotFound)
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteInstance(context.Background(), pool.ID, instance.Name)
+ err := s.StoreSQLMocked.DeleteInstance(s.adminCtx, pool.ID, instance.Name)
s.assertSQLMockExpectations()
s.Require().Nil(err)
@@ -316,17 +331,21 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBDeleteErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.Name, pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("12.10.12.13", "public", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -338,21 +357,21 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBDeleteErr() {
WillReturnError(fmt.Errorf("mocked delete instance error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteInstance(context.Background(), pool.ID, instance.Name)
+ err := s.StoreSQLMocked.DeleteInstance(s.adminCtx, pool.ID, instance.Name)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("deleting instance: mocked delete instance error", err.Error())
+ s.Require().Equal("error deleting instance: mocked delete instance error", err.Error())
}
func (s *InstancesTestSuite) TestAddInstanceEvent() {
storeInstance := s.Fixtures.Instances[0]
statusMsg := "test-status-message"
- err := s.Store.AddInstanceEvent(context.Background(), storeInstance.ID, params.StatusEvent, params.EventInfo, statusMsg)
+ err := s.Store.AddInstanceEvent(s.adminCtx, storeInstance.Name, params.StatusEvent, params.EventInfo, statusMsg)
s.Require().Nil(err)
- instance, err := s.Store.GetInstanceByName(context.Background(), storeInstance.Name)
+ instance, err := s.Store.GetInstance(s.adminCtx, storeInstance.Name)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get db instance: %s", err))
}
@@ -360,24 +379,22 @@ func (s *InstancesTestSuite) TestAddInstanceEvent() {
s.Require().Equal(statusMsg, instance.StatusMessages[0].Message)
}
-func (s *InstancesTestSuite) TestAddInstanceEventInvalidPoolID() {
- err := s.Store.AddInstanceEvent(context.Background(), "dummy-id", params.StatusEvent, params.EventInfo, "dummy-message")
-
- s.Require().Equal("updating instance: parsing id: invalid request", err.Error())
-}
-
func (s *InstancesTestSuite) TestAddInstanceEventDBUpdateErr() {
instance := s.Fixtures.Instances[0]
statusMsg := "test-status-message"
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE id = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("10.10.1.10", "private", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -392,15 +409,15 @@ func (s *InstancesTestSuite) TestAddInstanceEventDBUpdateErr() {
WillReturnError(fmt.Errorf("mocked add status message error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.AddInstanceEvent(context.Background(), instance.ID, params.StatusEvent, params.EventInfo, statusMsg)
+ err := s.StoreSQLMocked.AddInstanceEvent(s.adminCtx, instance.Name, params.StatusEvent, params.EventInfo, statusMsg)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("adding status message: mocked add status message error", err.Error())
+ s.Require().Equal("error adding status message: mocked add status message error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *InstancesTestSuite) TestUpdateInstance() {
- instance, err := s.Store.UpdateInstance(context.Background(), s.Fixtures.Instances[0].ID, s.Fixtures.UpdateInstanceParams)
+ instance, err := s.Store.UpdateInstance(s.adminCtx, s.Fixtures.Instances[0].Name, s.Fixtures.UpdateInstanceParams)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.UpdateInstanceParams.ProviderID, instance.ProviderID)
@@ -412,23 +429,21 @@ func (s *InstancesTestSuite) TestUpdateInstance() {
s.Require().Equal(s.Fixtures.UpdateInstanceParams.CreateAttempt, instance.CreateAttempt)
}
-func (s *InstancesTestSuite) TestUpdateInstanceInvalidPoolID() {
- _, err := s.Store.UpdateInstance(context.Background(), "dummy-id", params.UpdateInstanceParams{})
-
- s.Require().Equal("updating instance: parsing id: invalid request", err.Error())
-}
-
func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateInstanceErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE id = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("10.10.1.10", "private", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -439,24 +454,28 @@ func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateInstanceErr() {
WillReturnError(fmt.Errorf("mocked update instance error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateInstance(context.Background(), instance.ID, s.Fixtures.UpdateInstanceParams)
+ _, err := s.StoreSQLMocked.UpdateInstance(s.adminCtx, instance.Name, s.Fixtures.UpdateInstanceParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("updating instance: mocked update instance error", err.Error())
+ s.Require().Equal("error updating instance: mocked update instance error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateAddressErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE id = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("10.10.1.10", "private", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -481,28 +500,28 @@ func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateAddressErr() {
WillReturnError(fmt.Errorf("update addresses mock error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateInstance(context.Background(), instance.ID, s.Fixtures.UpdateInstanceParams)
+ _, err := s.StoreSQLMocked.UpdateInstance(s.adminCtx, instance.Name, s.Fixtures.UpdateInstanceParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("updating addresses: update addresses mock error", err.Error())
+ s.Require().Equal("error updating addresses: update addresses mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *InstancesTestSuite) TestListPoolInstances() {
- instances, err := s.Store.ListPoolInstances(context.Background(), s.Fixtures.Pool.ID)
+ instances, err := s.Store.ListPoolInstances(s.adminCtx, s.Fixtures.Pool.ID)
s.Require().Nil(err)
s.equalInstancesByName(s.Fixtures.Instances, instances)
}
func (s *InstancesTestSuite) TestListPoolInstancesInvalidPoolID() {
- _, err := s.Store.ListPoolInstances(context.Background(), "dummy-pool-id")
+ _, err := s.Store.ListPoolInstances(s.adminCtx, "dummy-pool-id")
- s.Require().Equal("parsing id: invalid request", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *InstancesTestSuite) TestListAllInstances() {
- instances, err := s.Store.ListAllInstances(context.Background())
+ instances, err := s.Store.ListAllInstances(s.adminCtx)
s.Require().Nil(err)
s.equalInstancesByName(s.Fixtures.Instances, instances)
@@ -513,46 +532,45 @@ func (s *InstancesTestSuite) TestListAllInstancesDBFetchErr() {
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE `instances`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("fetch instances mock error"))
- _, err := s.StoreSQLMocked.ListAllInstances(context.Background())
+ _, err := s.StoreSQLMocked.ListAllInstances(s.adminCtx)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching instances: fetch instances mock error", err.Error())
+ s.Require().Equal("error fetching instances: fetch instances mock error", err.Error())
}
func (s *InstancesTestSuite) TestPoolInstanceCount() {
- instancesCount, err := s.Store.PoolInstanceCount(context.Background(), s.Fixtures.Pool.ID)
+ instancesCount, err := s.Store.PoolInstanceCount(s.adminCtx, s.Fixtures.Pool.ID)
s.Require().Nil(err)
s.Require().Equal(int64(len(s.Fixtures.Instances)), instancesCount)
}
func (s *InstancesTestSuite) TestPoolInstanceCountInvalidPoolID() {
- _, err := s.Store.PoolInstanceCount(context.Background(), "dummy-pool-id")
+ _, err := s.Store.PoolInstanceCount(s.adminCtx, "dummy-pool-id")
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
}
func (s *InstancesTestSuite) TestPoolInstanceCountDBCountErr() {
pool := s.Fixtures.Pool
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT count(*) FROM `instances` WHERE pool_id = ? AND `instances`.`deleted_at` IS NULL")).
WithArgs(pool.ID).
WillReturnError(fmt.Errorf("count mock error"))
- _, err := s.StoreSQLMocked.PoolInstanceCount(context.Background(), pool.ID)
+ _, err := s.StoreSQLMocked.PoolInstanceCount(s.adminCtx, pool.ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching instance count: count mock error", err.Error())
+ s.Require().Equal("error fetching instance count: count mock error", err.Error())
}
func TestInstTestSuite(t *testing.T) {
- t.Parallel()
suite.Run(t, new(InstancesTestSuite))
}
diff --git a/database/sql/jobs.go b/database/sql/jobs.go
index 091dfd7c..ffa3a7b5 100644
--- a/database/sql/jobs.go
+++ b/database/sql/jobs.go
@@ -1,16 +1,33 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package sql
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+
+ "github.com/google/uuid"
+ "gorm.io/gorm"
+ "gorm.io/gorm/clause"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/database/common"
"github.com/cloudbase/garm/params"
- "github.com/google/uuid"
- "github.com/pkg/errors"
- "gorm.io/gorm"
- "gorm.io/gorm/clause"
)
var _ common.JobsStore = &sqlDatabase{}
@@ -19,12 +36,14 @@ func sqlWorkflowJobToParamsJob(job WorkflowJob) (params.Job, error) {
labels := []string{}
if job.Labels != nil {
if err := json.Unmarshal(job.Labels, &labels); err != nil {
- return params.Job{}, errors.Wrap(err, "unmarshaling labels")
+ return params.Job{}, fmt.Errorf("error unmarshaling labels: %w", err)
}
}
- return params.Job{
+ jobParam := params.Job{
ID: job.ID,
+ WorkflowJobID: job.WorkflowJobID,
+ ScaleSetJobID: job.ScaleSetJobID,
RunID: job.RunID,
Action: job.Action,
Status: job.Status,
@@ -33,7 +52,6 @@ func sqlWorkflowJobToParamsJob(job WorkflowJob) (params.Job, error) {
StartedAt: job.StartedAt,
CompletedAt: job.CompletedAt,
GithubRunnerID: job.GithubRunnerID,
- RunnerName: job.RunnerName,
RunnerGroupID: job.RunnerGroupID,
RunnerGroupName: job.RunnerGroupName,
RepositoryName: job.RepositoryName,
@@ -45,16 +63,23 @@ func sqlWorkflowJobToParamsJob(job WorkflowJob) (params.Job, error) {
CreatedAt: job.CreatedAt,
UpdatedAt: job.UpdatedAt,
LockedBy: job.LockedBy,
- }, nil
+ }
+
+ if job.InstanceID != nil {
+ jobParam.RunnerName = job.Instance.Name
+ }
+ return jobParam, nil
}
-func paramsJobToWorkflowJob(job params.Job) (WorkflowJob, error) {
- asJson, err := json.Marshal(job.Labels)
+func (s *sqlDatabase) paramsJobToWorkflowJob(ctx context.Context, job params.Job) (WorkflowJob, error) {
+ asJSON, err := json.Marshal(job.Labels)
if err != nil {
- return WorkflowJob{}, errors.Wrap(err, "marshaling labels")
+ return WorkflowJob{}, fmt.Errorf("error marshaling labels: %w", err)
}
- return WorkflowJob{
- ID: job.ID,
+
+ workflofJob := WorkflowJob{
+ ScaleSetJobID: job.ScaleSetJobID,
+ WorkflowJobID: job.WorkflowJobID,
RunID: job.RunID,
Action: job.Action,
Status: job.Status,
@@ -63,7 +88,6 @@ func paramsJobToWorkflowJob(job params.Job) (WorkflowJob, error) {
StartedAt: job.StartedAt,
CompletedAt: job.CompletedAt,
GithubRunnerID: job.GithubRunnerID,
- RunnerName: job.RunnerName,
RunnerGroupID: job.RunnerGroupID,
RunnerGroupName: job.RunnerGroupName,
RepositoryName: job.RepositoryName,
@@ -71,35 +95,67 @@ func paramsJobToWorkflowJob(job params.Job) (WorkflowJob, error) {
RepoID: job.RepoID,
OrgID: job.OrgID,
EnterpriseID: job.EnterpriseID,
- Labels: asJson,
+ Labels: asJSON,
LockedBy: job.LockedBy,
- }, nil
+ }
+
+ if job.RunnerName != "" {
+ instance, err := s.getInstance(s.ctx, job.RunnerName)
+ if err != nil {
+ // This usually is very normal as not all jobs run on our runners.
+ slog.DebugContext(ctx, "failed to get instance by name", "instance_name", job.RunnerName)
+ } else {
+ workflofJob.InstanceID = &instance.ID
+ }
+ }
+
+ return workflofJob, nil
}
-func (s *sqlDatabase) DeleteJob(ctx context.Context, jobID int64) error {
- q := s.conn.Delete(&WorkflowJob{}, jobID)
+func (s *sqlDatabase) DeleteJob(_ context.Context, jobID int64) (err error) {
+ var workflowJob WorkflowJob
+ q := s.conn.Where("workflow_job_id = ?", jobID).Preload("Instance").First(&workflowJob)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return nil
}
- return errors.Wrap(q.Error, "deleting job")
+ return fmt.Errorf("error fetching job: %w", q.Error)
+ }
+ removedJob, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return fmt.Errorf("error converting job: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ if notifyErr := s.sendNotify(common.JobEntityType, common.DeleteOperation, removedJob); notifyErr != nil {
+ slog.With(slog.Any("error", notifyErr)).Error("failed to send notify")
+ }
+ }
+ }()
+ q = s.conn.Delete(&workflowJob)
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error deleting job: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) LockJob(ctx context.Context, jobID int64, entityID string) error {
+func (s *sqlDatabase) LockJob(_ context.Context, jobID int64, entityID string) error {
entityUUID, err := uuid.Parse(entityID)
if err != nil {
- return errors.Wrap(err, "parsing entity id")
+ return fmt.Errorf("error parsing entity id: %w", err)
}
var workflowJob WorkflowJob
- q := s.conn.Clauses(clause.Locking{Strength: "UPDATE"}).Where("id = ?", jobID).First(&workflowJob)
+ q := s.conn.Preload("Instance").Where("workflow_job_id = ?", jobID).First(&workflowJob)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return runnerErrors.ErrNotFound
}
- return errors.Wrap(q.Error, "fetching job")
+ return fmt.Errorf("error fetching job: %w", q.Error)
}
if workflowJob.LockedBy.String() == entityID {
@@ -114,21 +170,27 @@ func (s *sqlDatabase) LockJob(ctx context.Context, jobID int64, entityID string)
workflowJob.LockedBy = entityUUID
if err := s.conn.Save(&workflowJob).Error; err != nil {
- return errors.Wrap(err, "saving job")
+ return fmt.Errorf("error saving job: %w", err)
}
+ asParams, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return fmt.Errorf("error converting job: %w", err)
+ }
+ s.sendNotify(common.JobEntityType, common.UpdateOperation, asParams)
+
return nil
}
-func (s *sqlDatabase) BreakLockJobIsQueued(ctx context.Context, jobID int64) error {
+func (s *sqlDatabase) BreakLockJobIsQueued(_ context.Context, jobID int64) (err error) {
var workflowJob WorkflowJob
- q := s.conn.Clauses(clause.Locking{Strength: "UPDATE"}).Where("id = ? and status = ?", jobID, params.JobStatusQueued).First(&workflowJob)
+ q := s.conn.Clauses(clause.Locking{Strength: "UPDATE"}).Preload("Instance").Where("workflow_job_id = ? and status = ?", jobID, params.JobStatusQueued).First(&workflowJob)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return nil
}
- return errors.Wrap(q.Error, "fetching job")
+ return fmt.Errorf("error fetching job: %w", q.Error)
}
if workflowJob.LockedBy == uuid.Nil {
@@ -138,21 +200,25 @@ func (s *sqlDatabase) BreakLockJobIsQueued(ctx context.Context, jobID int64) err
workflowJob.LockedBy = uuid.Nil
if err := s.conn.Save(&workflowJob).Error; err != nil {
- return errors.Wrap(err, "saving job")
+ return fmt.Errorf("error saving job: %w", err)
}
-
+ asParams, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return fmt.Errorf("error converting job: %w", err)
+ }
+ s.sendNotify(common.JobEntityType, common.UpdateOperation, asParams)
return nil
}
-func (s *sqlDatabase) UnlockJob(ctx context.Context, jobID int64, entityID string) error {
+func (s *sqlDatabase) UnlockJob(_ context.Context, jobID int64, entityID string) error {
var workflowJob WorkflowJob
- q := s.conn.Clauses(clause.Locking{Strength: "UPDATE"}).Where("id = ?", jobID).First(&workflowJob)
+ q := s.conn.Clauses(clause.Locking{Strength: "UPDATE"}).Where("workflow_job_id = ?", jobID).First(&workflowJob)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return runnerErrors.ErrNotFound
}
- return errors.Wrap(q.Error, "fetching job")
+ return fmt.Errorf("error fetching job: %w", q.Error)
}
if workflowJob.LockedBy == uuid.Nil {
@@ -166,24 +232,39 @@ func (s *sqlDatabase) UnlockJob(ctx context.Context, jobID int64, entityID strin
workflowJob.LockedBy = uuid.Nil
if err := s.conn.Save(&workflowJob).Error; err != nil {
- return errors.Wrap(err, "saving job")
+ return fmt.Errorf("error saving job: %w", err)
}
+ asParams, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return fmt.Errorf("error converting job: %w", err)
+ }
+ s.sendNotify(common.JobEntityType, common.UpdateOperation, asParams)
return nil
}
func (s *sqlDatabase) CreateOrUpdateJob(ctx context.Context, job params.Job) (params.Job, error) {
var workflowJob WorkflowJob
- q := s.conn.Clauses(clause.Locking{Strength: "UPDATE"}).Where("id = ?", job.ID).First(&workflowJob)
+ var err error
+
+ searchField := "workflow_job_id = ?"
+ var searchVal any = job.WorkflowJobID
+ if job.ScaleSetJobID != "" {
+ searchField = "scale_set_job_id = ?"
+ searchVal = job.ScaleSetJobID
+ }
+ q := s.conn.Preload("Instance").Where(searchField, searchVal).First(&workflowJob)
if q.Error != nil {
if !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return params.Job{}, errors.Wrap(q.Error, "fetching job")
+ return params.Job{}, fmt.Errorf("error fetching job: %w", q.Error)
}
}
-
+ var operation common.OperationType
if workflowJob.ID != 0 {
// Update workflowJob with values from job.
+ operation = common.UpdateOperation
+
workflowJob.Status = job.Status
workflowJob.Action = job.Action
workflowJob.Conclusion = job.Conclusion
@@ -192,13 +273,22 @@ func (s *sqlDatabase) CreateOrUpdateJob(ctx context.Context, job params.Job) (pa
workflowJob.GithubRunnerID = job.GithubRunnerID
workflowJob.RunnerGroupID = job.RunnerGroupID
workflowJob.RunnerGroupName = job.RunnerGroupName
+ if job.RunID != 0 && workflowJob.RunID == 0 {
+ workflowJob.RunID = job.RunID
+ }
if job.LockedBy != uuid.Nil {
workflowJob.LockedBy = job.LockedBy
}
if job.RunnerName != "" {
- workflowJob.RunnerName = job.RunnerName
+ instance, err := s.getInstance(ctx, job.RunnerName)
+ if err == nil {
+ workflowJob.InstanceID = &instance.ID
+ } else {
+ // This usually is very normal as not all jobs run on our runners.
+ slog.DebugContext(ctx, "failed to get instance by name", "instance_name", job.RunnerName)
+ }
}
if job.RepoID != nil {
@@ -213,25 +303,33 @@ func (s *sqlDatabase) CreateOrUpdateJob(ctx context.Context, job params.Job) (pa
workflowJob.EnterpriseID = job.EnterpriseID
}
if err := s.conn.Save(&workflowJob).Error; err != nil {
- return params.Job{}, errors.Wrap(err, "saving job")
+ return params.Job{}, fmt.Errorf("error saving job: %w", err)
}
} else {
- workflowJob, err := paramsJobToWorkflowJob(job)
+ operation = common.CreateOperation
+
+ workflowJob, err = s.paramsJobToWorkflowJob(ctx, job)
if err != nil {
- return params.Job{}, errors.Wrap(err, "converting job")
+ return params.Job{}, fmt.Errorf("error converting job: %w", err)
}
if err := s.conn.Create(&workflowJob).Error; err != nil {
- return params.Job{}, errors.Wrap(err, "creating job")
+ return params.Job{}, fmt.Errorf("error creating job: %w", err)
}
}
- return sqlWorkflowJobToParamsJob(workflowJob)
+ asParams, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return params.Job{}, fmt.Errorf("error converting job: %w", err)
+ }
+ s.sendNotify(common.JobEntityType, operation, asParams)
+
+ return asParams, nil
}
// ListJobsByStatus lists all jobs for a given status.
-func (s *sqlDatabase) ListJobsByStatus(ctx context.Context, status params.JobStatus) ([]params.Job, error) {
+func (s *sqlDatabase) ListJobsByStatus(_ context.Context, status params.JobStatus) ([]params.Job, error) {
var jobs []WorkflowJob
- query := s.conn.Model(&WorkflowJob{}).Where("status = ?", status)
+ query := s.conn.Model(&WorkflowJob{}).Preload("Instance").Where("status = ?", status)
if err := query.Find(&jobs); err.Error != nil {
return nil, err.Error
@@ -241,7 +339,7 @@ func (s *sqlDatabase) ListJobsByStatus(ctx context.Context, status params.JobSta
for idx, job := range jobs {
jobParam, err := sqlWorkflowJobToParamsJob(job)
if err != nil {
- return nil, errors.Wrap(err, "converting job")
+ return nil, fmt.Errorf("error converting job: %w", err)
}
ret[idx] = jobParam
}
@@ -249,21 +347,25 @@ func (s *sqlDatabase) ListJobsByStatus(ctx context.Context, status params.JobSta
}
// ListEntityJobsByStatus lists all jobs for a given entity type and id.
-func (s *sqlDatabase) ListEntityJobsByStatus(ctx context.Context, entityType params.PoolType, entityID string, status params.JobStatus) ([]params.Job, error) {
+func (s *sqlDatabase) ListEntityJobsByStatus(_ context.Context, entityType params.ForgeEntityType, entityID string, status params.JobStatus) ([]params.Job, error) {
u, err := uuid.Parse(entityID)
if err != nil {
return nil, err
}
var jobs []WorkflowJob
- query := s.conn.Model(&WorkflowJob{}).Where("status = ?", status)
+ query := s.conn.
+ Model(&WorkflowJob{}).
+ Preload("Instance").
+ Where("status = ?", status).
+ Where("workflow_job_id > 0")
switch entityType {
- case params.OrganizationPool:
+ case params.ForgeEntityTypeOrganization:
query = query.Where("org_id = ?", u)
- case params.RepositoryPool:
+ case params.ForgeEntityTypeRepository:
query = query.Where("repo_id = ?", u)
- case params.EnterprisePool:
+ case params.ForgeEntityTypeEnterprise:
query = query.Where("enterprise_id = ?", u)
}
@@ -278,18 +380,18 @@ func (s *sqlDatabase) ListEntityJobsByStatus(ctx context.Context, entityType par
for idx, job := range jobs {
jobParam, err := sqlWorkflowJobToParamsJob(job)
if err != nil {
- return nil, errors.Wrap(err, "converting job")
+ return nil, fmt.Errorf("error converting job: %w", err)
}
ret[idx] = jobParam
}
return ret, nil
}
-func (s *sqlDatabase) ListAllJobs(ctx context.Context) ([]params.Job, error) {
+func (s *sqlDatabase) ListAllJobs(_ context.Context) ([]params.Job, error) {
var jobs []WorkflowJob
query := s.conn.Model(&WorkflowJob{})
- if err := query.Find(&jobs); err.Error != nil {
+ if err := query.Preload("Instance").Find(&jobs); err.Error != nil {
if errors.Is(err.Error, gorm.ErrRecordNotFound) {
return []params.Job{}, nil
}
@@ -300,7 +402,7 @@ func (s *sqlDatabase) ListAllJobs(ctx context.Context) ([]params.Job, error) {
for idx, job := range jobs {
jobParam, err := sqlWorkflowJobToParamsJob(job)
if err != nil {
- return nil, errors.Wrap(err, "converting job")
+ return nil, fmt.Errorf("error converting job: %w", err)
}
ret[idx] = jobParam
}
@@ -308,9 +410,9 @@ func (s *sqlDatabase) ListAllJobs(ctx context.Context) ([]params.Job, error) {
}
// GetJobByID gets a job by id.
-func (s *sqlDatabase) GetJobByID(ctx context.Context, jobID int64) (params.Job, error) {
+func (s *sqlDatabase) GetJobByID(_ context.Context, jobID int64) (params.Job, error) {
var job WorkflowJob
- query := s.conn.Model(&WorkflowJob{}).Where("id = ?", jobID)
+ query := s.conn.Model(&WorkflowJob{}).Preload("Instance").Where("workflow_job_id = ?", jobID)
if err := query.First(&job); err.Error != nil {
if errors.Is(err.Error, gorm.ErrRecordNotFound) {
@@ -323,7 +425,7 @@ func (s *sqlDatabase) GetJobByID(ctx context.Context, jobID int64) (params.Job,
}
// DeleteCompletedJobs deletes all completed jobs.
-func (s *sqlDatabase) DeleteCompletedJobs(ctx context.Context) error {
+func (s *sqlDatabase) DeleteCompletedJobs(_ context.Context) error {
query := s.conn.Model(&WorkflowJob{}).Where("status = ?", params.JobStatusCompleted)
if err := query.Unscoped().Delete(&WorkflowJob{}); err.Error != nil {
diff --git a/database/sql/models.go b/database/sql/models.go
index 86a343cc..d3cb044a 100644
--- a/database/sql/models.go
+++ b/database/sql/models.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Cloudbase Solutions SRL
+// Copyright 2025 Cloudbase Solutions SRL
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
@@ -15,15 +15,15 @@
package sql
import (
+ "fmt"
"time"
+ "github.com/google/uuid"
+ "gorm.io/datatypes"
+ "gorm.io/gorm"
+
commonParams "github.com/cloudbase/garm-provider-common/params"
"github.com/cloudbase/garm/params"
-
- "github.com/google/uuid"
- "github.com/pkg/errors"
- "gorm.io/datatypes"
- "gorm.io/gorm"
)
type Base struct {
@@ -33,19 +33,35 @@ type Base struct {
DeletedAt gorm.DeletedAt `gorm:"index"`
}
-func (b *Base) BeforeCreate(tx *gorm.DB) error {
- emptyId := uuid.UUID{}
- if b.ID != emptyId {
+func (b *Base) BeforeCreate(_ *gorm.DB) error {
+ emptyID := uuid.UUID{}
+ if b.ID != emptyID {
return nil
}
newID, err := uuid.NewRandom()
if err != nil {
- return errors.Wrap(err, "generating id")
+ return fmt.Errorf("error generating id: %w", err)
}
b.ID = newID
return nil
}
+type ControllerInfo struct {
+ Base
+
+ ControllerID uuid.UUID
+
+ CallbackURL string
+ MetadataURL string
+ WebhookBaseURL string
+ // MinimumJobAgeBackoff is the minimum time that a job must be in the queue
+ // before GARM will attempt to allocate a runner to service it. This backoff
+ // is useful if you have idle runners in various pools that could potentially
+ // pick up the job. GARM would allow this amount of time for runners to react
+ // before spinning up a new one and potentially having to scale down later.
+ MinimumJobAgeBackoff uint
+}
+
type Tag struct {
Base
@@ -83,37 +99,155 @@ type Pool struct {
Enterprise Enterprise `gorm:"foreignKey:EnterpriseID"`
Instances []Instance `gorm:"foreignKey:PoolID"`
+ Priority uint `gorm:"index:idx_pool_priority"`
+}
+
+// ScaleSet represents a github scale set. Scale sets are almost identical to pools with a few
+// notable exceptions:
+// - Labels are no longer relevant
+// - Workflows will use the scaleset name to target runners.
+// - A scale set is a stand alone unit. If a workflow targets a scale set, no other runner will pick up that job.
+type ScaleSet struct {
+ gorm.Model
+
+ // ScaleSetID is the github ID of the scale set. This field may not be set if
+ // the scale set was ceated in GARM but has not yet been created in GitHub.
+ // The scale set ID is also not globally unique. It is only unique within the context
+ // of an entity.
+ ScaleSetID int `gorm:"index:idx_scale_set"`
+ Name string `gorm:"unique_index:idx_name"`
+ GitHubRunnerGroup string `gorm:"unique_index:idx_name"`
+ DisableUpdate bool
+
+ // State stores the provisioning state of the scale set in GitHub
+ State params.ScaleSetState
+ // ExtendedState stores a more detailed message regarding the State.
+ // If an error occurs, the reason for the error will be stored here.
+ ExtendedState string
+
+ ProviderName string
+ RunnerPrefix string
+ MaxRunners uint
+ MinIdleRunners uint
+ RunnerBootstrapTimeout uint
+ Image string
+ Flavor string
+ OSType commonParams.OSType
+ OSArch commonParams.OSArch
+ Enabled bool
+ LastMessageID int64
+ DesiredRunnerCount int
+ // ExtraSpecs is an opaque json that gets sent to the provider
+ // as part of the bootstrap params for instances. It can contain
+ // any kind of data needed by providers.
+ ExtraSpecs datatypes.JSON
+
+ RepoID *uuid.UUID `gorm:"index"`
+ Repository Repository `gorm:"foreignKey:RepoID;"`
+
+ OrgID *uuid.UUID `gorm:"index"`
+ Organization Organization `gorm:"foreignKey:OrgID"`
+
+ EnterpriseID *uuid.UUID `gorm:"index"`
+ Enterprise Enterprise `gorm:"foreignKey:EnterpriseID"`
+
+ Instances []Instance `gorm:"foreignKey:ScaleSetFkID"`
+}
+
+type RepositoryEvent struct {
+ gorm.Model
+
+ EventType params.EventType
+ EventLevel params.EventLevel
+ Message string `gorm:"type:text"`
+
+ RepoID uuid.UUID `gorm:"index:idx_repo_event"`
+ Repo Repository `gorm:"foreignKey:RepoID"`
}
type Repository struct {
Base
- CredentialsName string
- Owner string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
- Name string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
- WebhookSecret []byte
- Pools []Pool `gorm:"foreignKey:RepoID"`
- Jobs []WorkflowJob `gorm:"foreignKey:RepoID;constraint:OnDelete:SET NULL"`
+ CredentialsID *uint `gorm:"index"`
+ Credentials GithubCredentials `gorm:"foreignKey:CredentialsID;constraint:OnDelete:SET NULL"`
+
+ GiteaCredentialsID *uint `gorm:"index"`
+ GiteaCredentials GiteaCredentials `gorm:"foreignKey:GiteaCredentialsID;constraint:OnDelete:SET NULL"`
+
+ Owner string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
+ Name string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
+ WebhookSecret []byte
+ Pools []Pool `gorm:"foreignKey:RepoID"`
+ ScaleSets []ScaleSet `gorm:"foreignKey:RepoID"`
+ Jobs []WorkflowJob `gorm:"foreignKey:RepoID;constraint:OnDelete:SET NULL"`
+ PoolBalancerType params.PoolBalancerType `gorm:"type:varchar(64)"`
+
+ EndpointName *string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName;constraint:OnDelete:SET NULL"`
+
+ Events []RepositoryEvent `gorm:"foreignKey:RepoID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
}
+type OrganizationEvent struct {
+ gorm.Model
+
+ EventType params.EventType
+ EventLevel params.EventLevel
+ Message string `gorm:"type:text"`
+
+ OrgID uuid.UUID `gorm:"index:idx_org_event"`
+ Org Organization `gorm:"foreignKey:OrgID"`
+}
type Organization struct {
Base
- CredentialsName string
- Name string `gorm:"index:idx_org_name_nocase,collate:nocase"`
- WebhookSecret []byte
- Pools []Pool `gorm:"foreignKey:OrgID"`
- Jobs []WorkflowJob `gorm:"foreignKey:OrgID;constraint:OnDelete:SET NULL"`
+ CredentialsID *uint `gorm:"index"`
+ Credentials GithubCredentials `gorm:"foreignKey:CredentialsID;constraint:OnDelete:SET NULL"`
+
+ GiteaCredentialsID *uint `gorm:"index"`
+ GiteaCredentials GiteaCredentials `gorm:"foreignKey:GiteaCredentialsID;constraint:OnDelete:SET NULL"`
+
+ Name string `gorm:"index:idx_org_name_nocase,collate:nocase"`
+ WebhookSecret []byte
+ Pools []Pool `gorm:"foreignKey:OrgID"`
+ ScaleSet []ScaleSet `gorm:"foreignKey:OrgID"`
+ Jobs []WorkflowJob `gorm:"foreignKey:OrgID;constraint:OnDelete:SET NULL"`
+ PoolBalancerType params.PoolBalancerType `gorm:"type:varchar(64)"`
+
+ EndpointName *string `gorm:"index:idx_org_name_nocase,collate:nocase"`
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName;constraint:OnDelete:SET NULL"`
+
+ Events []OrganizationEvent `gorm:"foreignKey:OrgID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
+}
+
+type EnterpriseEvent struct {
+ gorm.Model
+
+ EventType params.EventType
+ EventLevel params.EventLevel
+ Message string `gorm:"type:text"`
+
+ EnterpriseID uuid.UUID `gorm:"index:idx_enterprise_event"`
+ Enterprise Enterprise `gorm:"foreignKey:EnterpriseID"`
}
type Enterprise struct {
Base
- CredentialsName string
- Name string `gorm:"index:idx_ent_name_nocase,collate:nocase"`
- WebhookSecret []byte
- Pools []Pool `gorm:"foreignKey:EnterpriseID"`
- Jobs []WorkflowJob `gorm:"foreignKey:EnterpriseID;constraint:OnDelete:SET NULL"`
+ CredentialsID *uint `gorm:"index"`
+ Credentials GithubCredentials `gorm:"foreignKey:CredentialsID;constraint:OnDelete:SET NULL"`
+
+ Name string `gorm:"index:idx_ent_name_nocase,collate:nocase"`
+ WebhookSecret []byte
+ Pools []Pool `gorm:"foreignKey:EnterpriseID"`
+ ScaleSet []ScaleSet `gorm:"foreignKey:EnterpriseID"`
+ Jobs []WorkflowJob `gorm:"foreignKey:EnterpriseID;constraint:OnDelete:SET NULL"`
+ PoolBalancerType params.PoolBalancerType `gorm:"type:varchar(64)"`
+
+ EndpointName *string `gorm:"index:idx_ent_name_nocase,collate:nocase"`
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName;constraint:OnDelete:SET NULL"`
+
+ Events []EnterpriseEvent `gorm:"foreignKey:EnterpriseID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
}
type Address struct {
@@ -155,35 +289,42 @@ type Instance struct {
ProviderFault []byte `gorm:"type:longblob"`
CreateAttempt int
TokenFetched bool
+ JitConfiguration []byte `gorm:"type:longblob"`
GitHubRunnerGroup string
AditionalLabels datatypes.JSON
- PoolID uuid.UUID
+ PoolID *uuid.UUID
Pool Pool `gorm:"foreignKey:PoolID"`
+ ScaleSetFkID *uint
+ ScaleSet ScaleSet `gorm:"foreignKey:ScaleSetFkID"`
+
StatusMessages []InstanceStatusUpdate `gorm:"foreignKey:InstanceID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
+
+ Job *WorkflowJob `gorm:"foreignKey:InstanceID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
}
type User struct {
Base
- Username string `gorm:"uniqueIndex;varchar(64)"`
- FullName string `gorm:"type:varchar(254)"`
- Email string `gorm:"type:varchar(254);unique;index:idx_email"`
- Password string `gorm:"type:varchar(60)"`
- IsAdmin bool
- Enabled bool
-}
-
-type ControllerInfo struct {
- Base
-
- ControllerID uuid.UUID
+ Username string `gorm:"uniqueIndex;varchar(64)"`
+ FullName string `gorm:"type:varchar(254)"`
+ Email string `gorm:"type:varchar(254);unique;index:idx_email"`
+ Password string `gorm:"type:varchar(60)"`
+ Generation uint
+ IsAdmin bool
+ Enabled bool
}
type WorkflowJob struct {
// ID is the ID of the job.
ID int64 `gorm:"index"`
+
+ // WorkflowJobID is the ID of the workflow job.
+ WorkflowJobID int64 `gorm:"index:workflow_job_id_idx"`
+ // ScaleSetJobID is the job ID for a scaleset job.
+ ScaleSetJobID string `gorm:"index:scaleset_job_id_idx"`
+
// RunID is the ID of the workflow run. A run may have multiple jobs.
RunID int64
// Action is the specific activity that triggered the event.
@@ -201,8 +342,11 @@ type WorkflowJob struct {
StartedAt time.Time
CompletedAt time.Time
- GithubRunnerID int64
- RunnerName string
+ GithubRunnerID int64
+
+ InstanceID *uuid.UUID `gorm:"index:idx_instance_job"`
+ Instance Instance `gorm:"foreignKey:InstanceID"`
+
RunnerGroupID int64
RunnerGroupName string
@@ -236,3 +380,55 @@ type WorkflowJob struct {
UpdatedAt time.Time
DeletedAt gorm.DeletedAt `gorm:"index"`
}
+
+type GithubEndpoint struct {
+ Name string `gorm:"type:varchar(64) collate nocase;primary_key;"`
+ CreatedAt time.Time
+ UpdatedAt time.Time
+ DeletedAt gorm.DeletedAt `gorm:"index"`
+
+ EndpointType params.EndpointType `gorm:"index:idx_endpoint_type"`
+
+ Description string `gorm:"type:text"`
+ APIBaseURL string `gorm:"type:text collate nocase"`
+ UploadBaseURL string `gorm:"type:text collate nocase"`
+ BaseURL string `gorm:"type:text collate nocase"`
+ CACertBundle []byte `gorm:"type:longblob"`
+}
+
+type GithubCredentials struct {
+ gorm.Model
+
+ Name string `gorm:"index:idx_github_credentials,unique;type:varchar(64) collate nocase"`
+ UserID *uuid.UUID `gorm:"index:idx_github_credentials,unique"`
+ User User `gorm:"foreignKey:UserID"`
+
+ Description string `gorm:"type:text"`
+ AuthType params.ForgeAuthType `gorm:"index"`
+ Payload []byte `gorm:"type:longblob"`
+
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName"`
+ EndpointName *string `gorm:"index"`
+
+ Repositories []Repository `gorm:"foreignKey:CredentialsID"`
+ Organizations []Organization `gorm:"foreignKey:CredentialsID"`
+ Enterprises []Enterprise `gorm:"foreignKey:CredentialsID"`
+}
+
+type GiteaCredentials struct {
+ gorm.Model
+
+ Name string `gorm:"index:idx_gitea_credentials,unique;type:varchar(64) collate nocase"`
+ UserID *uuid.UUID `gorm:"index:idx_gitea_credentials,unique"`
+ User User `gorm:"foreignKey:UserID"`
+
+ Description string `gorm:"type:text"`
+ AuthType params.ForgeAuthType `gorm:"index"`
+ Payload []byte `gorm:"type:longblob"`
+
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName"`
+ EndpointName *string `gorm:"index"`
+
+ Repositories []Repository `gorm:"foreignKey:GiteaCredentialsID"`
+ Organizations []Organization `gorm:"foreignKey:GiteaCredentialsID"`
+}
diff --git a/database/sql/organizations.go b/database/sql/organizations.go
index c0a48d4d..22be6272 100644
--- a/database/sql/organizations.go
+++ b/database/sql/organizations.go
@@ -16,298 +16,237 @@ package sql
import (
"context"
+ "errors"
"fmt"
+ "log/slog"
+
+ "github.com/google/uuid"
+ "gorm.io/gorm"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/database/common"
"github.com/cloudbase/garm/params"
-
- "github.com/google/uuid"
- "github.com/pkg/errors"
- "gorm.io/datatypes"
- "gorm.io/gorm"
)
-func (s *sqlDatabase) CreateOrganization(ctx context.Context, name, credentialsName, webhookSecret string) (params.Organization, error) {
+func (s *sqlDatabase) CreateOrganization(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (param params.Organization, err error) {
if webhookSecret == "" {
return params.Organization{}, errors.New("creating org: missing secret")
}
- secret, err := util.Aes256EncodeString(webhookSecret, s.cfg.Passphrase)
+ secret, err := util.Seal([]byte(webhookSecret), []byte(s.cfg.Passphrase))
if err != nil {
- return params.Organization{}, fmt.Errorf("failed to encrypt string")
+ return params.Organization{}, fmt.Errorf("error encoding secret: %w", err)
}
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.OrganizationEntityType, common.CreateOperation, param)
+ }
+ }()
newOrg := Organization{
- Name: name,
- WebhookSecret: secret,
- CredentialsName: credentialsName,
+ Name: name,
+ WebhookSecret: secret,
+ PoolBalancerType: poolBalancerType,
}
- q := s.conn.Create(&newOrg)
- if q.Error != nil {
- return params.Organization{}, errors.Wrap(q.Error, "creating org")
- }
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ switch credentials.ForgeType {
+ case params.GithubEndpointType:
+ newOrg.CredentialsID = &credentials.ID
+ case params.GiteaEndpointType:
+ newOrg.GiteaCredentialsID = &credentials.ID
+ default:
+ return fmt.Errorf("unsupported credentials type: %w", runnerErrors.ErrBadRequest)
+ }
- param, err := s.sqlToCommonOrganization(newOrg)
+ newOrg.EndpointName = &credentials.Endpoint.Name
+ q := tx.Create(&newOrg)
+ if q.Error != nil {
+ return fmt.Errorf("error creating org: %w", q.Error)
+ }
+ return nil
+ })
if err != nil {
- return params.Organization{}, errors.Wrap(err, "creating org")
+ return params.Organization{}, fmt.Errorf("error creating org: %w", err)
}
- param.WebhookSecret = webhookSecret
- return param, nil
+ ret, err := s.GetOrganizationByID(ctx, newOrg.ID.String())
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error creating org: %w", err)
+ }
+
+ return ret, nil
}
-func (s *sqlDatabase) GetOrganization(ctx context.Context, name string) (params.Organization, error) {
- org, err := s.getOrg(ctx, name)
+func (s *sqlDatabase) GetOrganization(ctx context.Context, name, endpointName string) (params.Organization, error) {
+ org, err := s.getOrg(ctx, name, endpointName)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
- param, err := s.sqlToCommonOrganization(org)
+ param, err := s.sqlToCommonOrganization(org, true)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) ListOrganizations(ctx context.Context) ([]params.Organization, error) {
+func (s *sqlDatabase) ListOrganizations(_ context.Context, filter params.OrganizationFilter) ([]params.Organization, error) {
var orgs []Organization
- q := s.conn.Find(&orgs)
+ q := s.conn.
+ Preload("Credentials").
+ Preload("GiteaCredentials").
+ Preload("Credentials.Endpoint").
+ Preload("GiteaCredentials.Endpoint").
+ Preload("Endpoint")
+
+ if filter.Name != "" {
+ q = q.Where("name = ?", filter.Name)
+ }
+
+ if filter.Endpoint != "" {
+ q = q.Where("endpoint_name = ?", filter.Endpoint)
+ }
+ q = q.Find(&orgs)
if q.Error != nil {
- return []params.Organization{}, errors.Wrap(q.Error, "fetching org from database")
+ return []params.Organization{}, fmt.Errorf("error fetching org from database: %w", q.Error)
}
ret := make([]params.Organization, len(orgs))
for idx, val := range orgs {
var err error
- ret[idx], err = s.sqlToCommonOrganization(val)
+ ret[idx], err = s.sqlToCommonOrganization(val, true)
if err != nil {
- return nil, errors.Wrap(err, "fetching org")
+ return nil, fmt.Errorf("error fetching org: %w", err)
}
}
return ret, nil
}
-func (s *sqlDatabase) DeleteOrganization(ctx context.Context, orgID string) error {
- org, err := s.getOrgByID(ctx, orgID)
+func (s *sqlDatabase) DeleteOrganization(ctx context.Context, orgID string) (err error) {
+ org, err := s.getOrgByID(ctx, s.conn, orgID, "Endpoint", "Credentials", "Credentials.Endpoint", "GiteaCredentials", "GiteaCredentials.Endpoint")
if err != nil {
- return errors.Wrap(err, "fetching org")
+ return fmt.Errorf("error fetching org: %w", err)
}
+ defer func(org Organization) {
+ if err == nil {
+ asParam, innerErr := s.sqlToCommonOrganization(org, true)
+ if innerErr == nil {
+ s.sendNotify(common.OrganizationEntityType, common.DeleteOperation, asParam)
+ } else {
+ slog.With(slog.Any("error", innerErr)).ErrorContext(ctx, "error sending delete notification", "org", orgID)
+ }
+ }
+ }(org)
+
q := s.conn.Unscoped().Delete(&org)
if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting org")
+ return fmt.Errorf("error deleting org: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (params.Organization, error) {
- org, err := s.getOrgByID(ctx, orgID)
- if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
- }
-
- if param.CredentialsName != "" {
- org.CredentialsName = param.CredentialsName
- }
-
- if param.WebhookSecret != "" {
- secret, err := util.Aes256EncodeString(param.WebhookSecret, s.cfg.Passphrase)
- if err != nil {
- return params.Organization{}, fmt.Errorf("saving org: failed to encrypt string: %w", err)
+func (s *sqlDatabase) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (paramOrg params.Organization, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.OrganizationEntityType, common.UpdateOperation, paramOrg)
+ }
+ }()
+ var org Organization
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var err error
+ org, err = s.getOrgByID(ctx, tx, orgID)
+ if err != nil {
+ return fmt.Errorf("error fetching org: %w", err)
+ }
+ if org.EndpointName == nil {
+ return fmt.Errorf("error org has no endpoint: %w", runnerErrors.ErrUnprocessable)
}
- org.WebhookSecret = secret
- }
- q := s.conn.Save(&org)
- if q.Error != nil {
- return params.Organization{}, errors.Wrap(q.Error, "saving org")
- }
+ if param.CredentialsName != "" {
+ creds, err = s.getGithubCredentialsByName(ctx, tx, param.CredentialsName, false)
+ if err != nil {
+ return fmt.Errorf("error fetching credentials: %w", err)
+ }
+ if creds.EndpointName == nil {
+ return fmt.Errorf("error credentials have no endpoint: %w", runnerErrors.ErrUnprocessable)
+ }
- newParams, err := s.sqlToCommonOrganization(org)
+ if *creds.EndpointName != *org.EndpointName {
+ return fmt.Errorf("error endpoint mismatch: %w", runnerErrors.ErrBadRequest)
+ }
+ org.CredentialsID = &creds.ID
+ }
+
+ if param.WebhookSecret != "" {
+ secret, err := util.Seal([]byte(param.WebhookSecret), []byte(s.cfg.Passphrase))
+ if err != nil {
+ return fmt.Errorf("saving org: failed to encrypt string: %w", err)
+ }
+ org.WebhookSecret = secret
+ }
+
+ if param.PoolBalancerType != "" {
+ org.PoolBalancerType = param.PoolBalancerType
+ }
+
+ q := tx.Save(&org)
+ if q.Error != nil {
+ return fmt.Errorf("error saving org: %w", q.Error)
+ }
+
+ return nil
+ })
if err != nil {
- return params.Organization{}, errors.Wrap(err, "saving org")
+ return params.Organization{}, fmt.Errorf("error saving org: %w", err)
}
- return newParams, nil
+
+ org, err = s.getOrgByID(ctx, s.conn, orgID, "Endpoint", "Credentials", "Credentials.Endpoint", "GiteaCredentials", "GiteaCredentials.Endpoint")
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error updating enterprise: %w", err)
+ }
+ paramOrg, err = s.sqlToCommonOrganization(org, true)
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error saving org: %w", err)
+ }
+ return paramOrg, nil
}
func (s *sqlDatabase) GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error) {
- org, err := s.getOrgByID(ctx, orgID, "Pools")
+ preloadList := []string{
+ "Pools",
+ "Credentials",
+ "Endpoint",
+ "Credentials.Endpoint",
+ "GiteaCredentials",
+ "GiteaCredentials.Endpoint",
+ "Events",
+ }
+ org, err := s.getOrgByID(ctx, s.conn, orgID, preloadList...)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
- param, err := s.sqlToCommonOrganization(org)
+ param, err := s.sqlToCommonOrganization(org, true)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching enterprise")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) CreateOrganizationPool(ctx context.Context, orgId string, param params.CreatePoolParams) (params.Pool, error) {
- if len(param.Tags) == 0 {
- return params.Pool{}, runnerErrors.NewBadRequestError("no tags specified")
- }
-
- org, err := s.getOrgByID(ctx, orgId)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching org")
- }
-
- newPool := Pool{
- ProviderName: param.ProviderName,
- MaxRunners: param.MaxRunners,
- MinIdleRunners: param.MinIdleRunners,
- RunnerPrefix: param.GetRunnerPrefix(),
- Image: param.Image,
- Flavor: param.Flavor,
- OSType: param.OSType,
- OSArch: param.OSArch,
- OrgID: &org.ID,
- Enabled: param.Enabled,
- RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
- }
-
- if len(param.ExtraSpecs) > 0 {
- newPool.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
- }
-
- _, err = s.getOrgPoolByUniqueFields(ctx, orgId, newPool.ProviderName, newPool.Image, newPool.Flavor)
- if err != nil {
- if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Pool{}, errors.Wrap(err, "creating pool")
- }
- } else {
- return params.Pool{}, runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider")
- }
-
- tags := []Tag{}
- for _, val := range param.Tags {
- t, err := s.getOrCreateTag(val)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching tag")
- }
- tags = append(tags, t)
- }
-
- q := s.conn.Create(&newPool)
- if q.Error != nil {
- return params.Pool{}, errors.Wrap(q.Error, "adding pool")
- }
-
- for _, tt := range tags {
- if err := s.conn.Model(&newPool).Association("Tags").Append(&tt); err != nil {
- return params.Pool{}, errors.Wrap(err, "saving tag")
- }
- }
-
- pool, err := s.getPoolByID(ctx, newPool.ID.String(), "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) ListOrgPools(ctx context.Context, orgID string) ([]params.Pool, error) {
- pools, err := s.listEntityPools(ctx, params.OrganizationPool, orgID, "Tags", "Instances")
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
-
- ret := make([]params.Pool, len(pools))
- for idx, pool := range pools {
- ret[idx] = s.sqlToCommonPool(pool)
- }
-
- return ret, nil
-}
-
-func (s *sqlDatabase) GetOrganizationPool(ctx context.Context, orgID, poolID string) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.OrganizationPool, orgID, poolID, "Tags", "Instances")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) DeleteOrganizationPool(ctx context.Context, orgID, poolID string) error {
- pool, err := s.getEntityPool(ctx, params.OrganizationPool, orgID, poolID)
- if err != nil {
- return errors.Wrap(err, "looking up org pool")
- }
- q := s.conn.Unscoped().Delete(&pool)
- if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting pool")
- }
- return nil
-}
-
-func (s *sqlDatabase) FindOrganizationPoolByTags(ctx context.Context, orgID string, tags []string) (params.Pool, error) {
- pool, err := s.findPoolByTags(orgID, params.OrganizationPool, tags)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool[0], nil
-}
-
-func (s *sqlDatabase) ListOrgInstances(ctx context.Context, orgID string) ([]params.Instance, error) {
- pools, err := s.listEntityPools(ctx, params.OrganizationPool, orgID, "Tags", "Instances")
- if err != nil {
- return nil, errors.Wrap(err, "fetching org")
- }
- ret := []params.Instance{}
- for _, pool := range pools {
- for _, instance := range pool.Instances {
- ret = append(ret, s.sqlToParamsInstance(instance))
- }
- }
- return ret, nil
-}
-
-func (s *sqlDatabase) UpdateOrganizationPool(ctx context.Context, orgID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.OrganizationPool, orgID, poolID, "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.updatePool(pool, param)
-}
-
-func (s *sqlDatabase) getPoolByID(ctx context.Context, poolID string, preload ...string) (Pool, error) {
- u, err := uuid.Parse(poolID)
- if err != nil {
- return Pool{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
- }
- var pool Pool
- q := s.conn.Model(&Pool{})
- if len(preload) > 0 {
- for _, item := range preload {
- q = q.Preload(item)
- }
- }
-
- q = q.Where("id = ?", u).First(&pool)
-
- if q.Error != nil {
- if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return Pool{}, runnerErrors.ErrNotFound
- }
- return Pool{}, errors.Wrap(q.Error, "fetching org from database")
- }
- return pool, nil
-}
-
-func (s *sqlDatabase) getOrgByID(ctx context.Context, id string, preload ...string) (Organization, error) {
+func (s *sqlDatabase) getOrgByID(_ context.Context, db *gorm.DB, id string, preload ...string) (Organization, error) {
u, err := uuid.Parse(id)
if err != nil {
- return Organization{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return Organization{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
var org Organization
- q := s.conn
+ q := db
if len(preload) > 0 {
for _, field := range preload {
q = q.Preload(field)
@@ -319,40 +258,26 @@ func (s *sqlDatabase) getOrgByID(ctx context.Context, id string, preload ...stri
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Organization{}, runnerErrors.ErrNotFound
}
- return Organization{}, errors.Wrap(q.Error, "fetching org from database")
+ return Organization{}, fmt.Errorf("error fetching org from database: %w", q.Error)
}
return org, nil
}
-func (s *sqlDatabase) getOrg(ctx context.Context, name string) (Organization, error) {
+func (s *sqlDatabase) getOrg(_ context.Context, name, endpointName string) (Organization, error) {
var org Organization
- q := s.conn.Where("name = ? COLLATE NOCASE", name)
- q = q.First(&org)
+ q := s.conn.Where("name = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE", name, endpointName).
+ Preload("Credentials").
+ Preload("GiteaCredentials").
+ Preload("Credentials.Endpoint").
+ Preload("GiteaCredentials.Endpoint").
+ Preload("Endpoint").
+ First(&org)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Organization{}, runnerErrors.ErrNotFound
}
- return Organization{}, errors.Wrap(q.Error, "fetching org from database")
+ return Organization{}, fmt.Errorf("error fetching org from database: %w", q.Error)
}
return org, nil
}
-
-func (s *sqlDatabase) getOrgPoolByUniqueFields(ctx context.Context, orgID string, provider, image, flavor string) (Pool, error) {
- org, err := s.getOrgByID(ctx, orgID)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching org")
- }
-
- q := s.conn
- var pool []Pool
- err = q.Model(&org).Association("Pools").Find(&pool, "provider_name = ? and image = ? and flavor = ?", provider, image, flavor)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching pool")
- }
- if len(pool) == 0 {
- return Pool{}, runnerErrors.ErrNotFound
- }
-
- return pool[0], nil
-}
diff --git a/database/sql/organizations_test.go b/database/sql/organizations_test.go
index b664fc8b..245b3c1f 100644
--- a/database/sql/organizations_test.go
+++ b/database/sql/organizations_test.go
@@ -22,16 +22,16 @@ import (
"sort"
"testing"
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
-
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ "github.com/cloudbase/garm/auth"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type OrgTestFixtures struct {
@@ -49,6 +49,15 @@ type OrgTestSuite struct {
Store dbCommon.Store
StoreSQLMocked *sqlDatabase
Fixtures *OrgTestFixtures
+
+ adminCtx context.Context
+ adminUserID string
+
+ testCreds params.ForgeCredentials
+ testCredsGitea params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ giteaEndpoint params.ForgeEndpoint
}
func (s *OrgTestSuite) equalInstancesByName(expected, actual []params.Instance) {
@@ -71,23 +80,36 @@ func (s *OrgTestSuite) assertSQLMockExpectations() {
func (s *OrgTestSuite) SetupTest() {
// create testing sqlite database
- db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
+ dbConfig := garmTesting.GetTestSqliteDBConfig(s.T())
+ db, err := NewSQLDatabase(context.Background(), dbConfig)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
s.Store = db
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+ s.adminUserID = auth.UserID(adminCtx)
+ s.Require().NotEmpty(s.adminUserID)
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.giteaEndpoint = garmTesting.CreateDefaultGiteaEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.testCredsGitea = garmTesting.CreateTestGiteaCredentials(adminCtx, "new-creds", db, s.T(), s.giteaEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create some organization objects in the database, for testing purposes
orgs := []params.Organization{}
for i := 1; i <= 3; i++ {
org, err := db.CreateOrganization(
- context.Background(),
+ s.adminCtx,
fmt.Sprintf("test-org-%d", i),
- fmt.Sprintf("test-creds-%d", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%d", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
- s.FailNow(fmt.Sprintf("failed to create database object (test-org-%d)", i))
+ s.FailNow(fmt.Sprintf("failed to create database object (test-org-%d): %q", i, err))
}
orgs = append(orgs, org)
@@ -104,7 +126,7 @@ func (s *OrgTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -113,7 +135,7 @@ func (s *OrgTestSuite) SetupTest() {
}
s.StoreSQLMocked = &sqlDatabase{
conn: gormConn,
- cfg: garmTesting.GetTestSqliteDBConfig(s.T()),
+ cfg: dbConfig,
}
// setup test fixtures
@@ -122,8 +144,8 @@ func (s *OrgTestSuite) SetupTest() {
fixtures := &OrgTestFixtures{
Orgs: orgs,
CreateOrgParams: params.CreateOrgParams{
- Name: "new-test-org",
- CredentialsName: "new-creds",
+ Name: s.testCreds.Name,
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "new-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -135,14 +157,14 @@ func (s *OrgTestSuite) SetupTest() {
Flavor: "test-flavor",
OSType: "linux",
OSArch: "amd64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"amd64-linux-runner"},
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
OSType: "linux",
},
UpdateRepoParams: params.UpdateEntityParams{
- CredentialsName: "test-update-creds",
+ CredentialsName: s.secondaryTestCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -159,20 +181,77 @@ func (s *OrgTestSuite) SetupTest() {
func (s *OrgTestSuite) TestCreateOrganization() {
// call tested function
org, err := s.Store.CreateOrganization(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateOrgParams.Name,
- s.Fixtures.CreateOrgParams.CredentialsName,
- s.Fixtures.CreateOrgParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
// assertions
s.Require().Nil(err)
- storeOrg, err := s.Store.GetOrganizationByID(context.Background(), org.ID)
+ storeOrg, err := s.Store.GetOrganizationByID(s.adminCtx, org.ID)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get organization by id: %v", err))
}
s.Require().Equal(storeOrg.Name, org.Name)
- s.Require().Equal(storeOrg.CredentialsName, org.CredentialsName)
+ s.Require().Equal(storeOrg.Credentials.Name, org.Credentials.Name)
s.Require().Equal(storeOrg.WebhookSecret, org.WebhookSecret)
+
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
+ s.Require().Equal(entity.EntityType, params.ForgeEntityTypeOrganization)
+ s.Require().Equal(entity.ID, org.ID)
+
+ forgeType, err := entity.GetForgeType()
+ s.Require().Nil(err)
+ s.Require().Equal(forgeType, params.GithubEndpointType)
+}
+
+func (s *OrgTestSuite) TestCreateOrgForGitea() {
+ // call tested function
+ org, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ s.Fixtures.CreateOrgParams.Name,
+ s.testCredsGitea,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+
+ // assertions
+ s.Require().Nil(err)
+ storeOrg, err := s.Store.GetOrganizationByID(s.adminCtx, org.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get organization by id: %v", err))
+ }
+ s.Require().Equal(storeOrg.Name, org.Name)
+ s.Require().Equal(storeOrg.Credentials.Name, org.Credentials.Name)
+ s.Require().Equal(storeOrg.WebhookSecret, org.WebhookSecret)
+
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
+ s.Require().Equal(entity.EntityType, params.ForgeEntityTypeOrganization)
+ s.Require().Equal(entity.ID, org.ID)
+
+ forgeType, err := entity.GetForgeType()
+ s.Require().Nil(err)
+ s.Require().Equal(forgeType, params.GiteaEndpointType)
+}
+
+func (s *OrgTestSuite) TestCreateOrganizationInvalidForgeType() {
+ credentials := params.ForgeCredentials{
+ Name: "test-creds",
+ Endpoint: s.githubEndpoint,
+ ID: 99,
+ ForgeType: params.EndpointType("invalid-forge-type"),
+ }
+
+ _, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ s.Fixtures.CreateOrgParams.Name,
+ credentials,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+ s.Require().NotNil(err)
+ s.Require().Equal("error creating org: unsupported credentials type: invalid request", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationInvalidDBPassphrase() {
@@ -182,20 +261,21 @@ func (s *OrgTestSuite) TestCreateOrganizationInvalidDBPassphrase() {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
// make sure we use a 'sqlDatabase' struct with a wrong 'cfg.Passphrase'
- cfg.Passphrase = "wrong-passphrase" // it must have a size different than 32
+ cfg.Passphrase = wrongPassphrase // it must have a size different than 32
sqlDB := &sqlDatabase{
conn: conn,
cfg: cfg,
}
_, err = sqlDB.CreateOrganization(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateOrgParams.Name,
- s.Fixtures.CreateOrgParams.CredentialsName,
- s.Fixtures.CreateOrgParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
s.Require().NotNil(err)
- s.Require().Equal("failed to encrypt string", err.Error())
+ s.Require().Equal("error encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationDBCreateErr() {
@@ -206,18 +286,19 @@ func (s *OrgTestSuite) TestCreateOrganizationDBCreateErr() {
s.Fixtures.SQLMock.ExpectRollback()
_, err := s.StoreSQLMocked.CreateOrganization(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateOrgParams.Name,
- s.Fixtures.CreateOrgParams.CredentialsName,
- s.Fixtures.CreateOrgParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating org: creating org mock error", err.Error())
+ s.Require().Equal("error creating org: error creating org: creating org mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestGetOrganization() {
- org, err := s.Store.GetOrganization(context.Background(), s.Fixtures.Orgs[0].Name)
+ org, err := s.Store.GetOrganization(s.adminCtx, s.Fixtures.Orgs[0].Name, s.Fixtures.Orgs[0].Endpoint.Name)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Orgs[0].Name, org.Name)
@@ -225,71 +306,127 @@ func (s *OrgTestSuite) TestGetOrganization() {
}
func (s *OrgTestSuite) TestGetOrganizationCaseInsensitive() {
- org, err := s.Store.GetOrganization(context.Background(), "TeSt-oRg-1")
+ org, err := s.Store.GetOrganization(s.adminCtx, "TeSt-oRg-1", "github.com")
s.Require().Nil(err)
s.Require().Equal("test-org-1", org.Name)
}
func (s *OrgTestSuite) TestGetOrganizationNotFound() {
- _, err := s.Store.GetOrganization(context.Background(), "dummy-name")
+ _, err := s.Store.GetOrganization(s.adminCtx, "dummy-name", "github.com")
s.Require().NotNil(err)
- s.Require().Equal("fetching org: not found", err.Error())
+ s.Require().Equal("error fetching org: not found", err.Error())
}
func (s *OrgTestSuite) TestGetOrganizationDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE name = ? COLLATE NOCASE AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].Name).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE (name = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE) AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].Name, s.Fixtures.Orgs[0].Endpoint.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow(s.Fixtures.Orgs[0].Name))
- _, err := s.StoreSQLMocked.GetOrganization(context.Background(), s.Fixtures.Orgs[0].Name)
+ _, err := s.StoreSQLMocked.GetOrganization(s.adminCtx, s.Fixtures.Orgs[0].Name, s.Fixtures.Orgs[0].Endpoint.Name)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching org: missing secret", err.Error())
+ s.Require().Equal("error fetching org: missing secret", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestListOrganizations() {
- orgs, err := s.Store.ListOrganizations(context.Background())
+ orgs, err := s.Store.ListOrganizations(s.adminCtx, params.OrganizationFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), s.Fixtures.Orgs, orgs)
}
+func (s *OrgTestSuite) TestListOrganizationsWithFilters() {
+ org, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ "test-org",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ org2, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ "test-org",
+ s.testCredsGitea,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ org3, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ "test-org2",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ orgs, err := s.Store.ListOrganizations(
+ s.adminCtx,
+ params.OrganizationFilter{
+ Name: "test-org",
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org, org2}, orgs)
+
+ orgs, err = s.Store.ListOrganizations(
+ s.adminCtx,
+ params.OrganizationFilter{
+ Name: "test-org",
+ Endpoint: s.giteaEndpoint.Name,
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org2}, orgs)
+
+ orgs, err = s.Store.ListOrganizations(
+ s.adminCtx,
+ params.OrganizationFilter{
+ Name: "test-org2",
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org3}, orgs)
+}
+
func (s *OrgTestSuite) TestListOrganizationsDBFetchErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE `organizations`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("fetching user from database mock error"))
- _, err := s.StoreSQLMocked.ListOrganizations(context.Background())
+ _, err := s.StoreSQLMocked.ListOrganizations(s.adminCtx, params.OrganizationFilter{})
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching org from database: fetching user from database mock error", err.Error())
+ s.Require().Equal("error fetching org from database: fetching user from database mock error", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganization() {
- err := s.Store.DeleteOrganization(context.Background(), s.Fixtures.Orgs[0].ID)
+ err := s.Store.DeleteOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.Require().Nil(err)
- _, err = s.Store.GetOrganizationByID(context.Background(), s.Fixtures.Orgs[0].ID)
+ _, err = s.Store.GetOrganizationByID(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching org: not found", err.Error())
+ s.Require().Equal("error fetching org: not found", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationInvalidOrgID() {
- err := s.Store.DeleteOrganization(context.Background(), "dummy-org-id")
+ err := s.Store.DeleteOrganization(s.adminCtx, "dummy-org-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching org: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationDBDeleteErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -298,114 +435,153 @@ func (s *OrgTestSuite) TestDeleteOrganizationDBDeleteErr() {
WillReturnError(fmt.Errorf("mocked delete org error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteOrganization(context.Background(), s.Fixtures.Orgs[0].ID)
+ err := s.StoreSQLMocked.DeleteOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("deleting org: mocked delete org error", err.Error())
+ s.Require().Equal("error deleting org: mocked delete org error", err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganization() {
- org, err := s.Store.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
+ org, err := s.Store.UpdateOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, org.WebhookSecret)
}
func (s *OrgTestSuite) TestUpdateOrganizationInvalidOrgID() {
- _, err := s.Store.UpdateOrganization(context.Background(), "dummy-org-id", s.Fixtures.UpdateRepoParams)
+ _, err := s.Store.UpdateOrganization(s.adminCtx, "dummy-org-id", s.Fixtures.UpdateRepoParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error saving org: error fetching org: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganizationDBEncryptErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
-
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Orgs[0].ID, s.Fixtures.Orgs[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving org: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error saving org: saving org: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestUpdateOrganizationDBSaveErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Orgs[0].ID, s.Fixtures.Orgs[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
s.Fixtures.SQLMock.
ExpectExec(("UPDATE `organizations` SET")).
WillReturnError(fmt.Errorf("saving org mock error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving org: saving org mock error", err.Error())
+ s.Require().Equal("error saving org: error saving org: saving org mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestUpdateOrganizationDBDecryptingErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
- s.Fixtures.UpdateRepoParams.WebhookSecret = "webhook-secret"
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Orgs[0].ID, s.Fixtures.Orgs[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving org: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error saving org: saving org: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestGetOrganizationByID() {
- org, err := s.Store.GetOrganizationByID(context.Background(), s.Fixtures.Orgs[0].ID)
+ org, err := s.Store.GetOrganizationByID(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Orgs[0].ID, org.ID)
}
func (s *OrgTestSuite) TestGetOrganizationByIDInvalidOrgID() {
- _, err := s.Store.GetOrganizationByID(context.Background(), "dummy-org-id")
+ _, err := s.Store.GetOrganizationByID(s.adminCtx, "dummy-org-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching org: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestGetOrganizationByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organization_events` WHERE `organization_events`.`org_id` = ? AND `organization_events`.`deleted_at` IS NULL")).
+ WithArgs(s.Fixtures.Orgs[0].ID).
+ WillReturnRows(sqlmock.NewRows([]string{"org_id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND `pools`.`deleted_at` IS NULL")).
WithArgs(s.Fixtures.Orgs[0].ID).
WillReturnRows(sqlmock.NewRows([]string{"org_id"}).AddRow(s.Fixtures.Orgs[0].ID))
- _, err := s.StoreSQLMocked.GetOrganizationByID(context.Background(), s.Fixtures.Orgs[0].ID)
+ _, err := s.StoreSQLMocked.GetOrganizationByID(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: missing secret", err.Error())
+ s.Require().Equal("error fetching org: missing secret", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationPool() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().Nil(err)
- org, err := s.Store.GetOrganizationByID(context.Background(), s.Fixtures.Orgs[0].ID)
+ org, err := s.Store.GetOrganizationByID(s.adminCtx, s.Fixtures.Orgs[0].ID)
if err != nil {
s.FailNow(fmt.Sprintf("cannot get org by ID: %v", err))
}
@@ -418,216 +594,120 @@ func (s *OrgTestSuite) TestCreateOrganizationPool() {
func (s *OrgTestSuite) TestCreateOrganizationPoolMissingTags() {
s.Fixtures.CreatePoolParams.Tags = []string{}
-
- _, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
s.Require().Equal("no tags specified", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationPoolInvalidOrgID() {
- _, err := s.Store.CreateOrganizationPool(context.Background(), "dummy-org-id", s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
-}
-
-func (s *OrgTestSuite) TestCreateOrganizationPoolDBCreateErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WillReturnError(fmt.Errorf("mocked creating pool error"))
-
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("creating pool: fetching pool: mocked creating pool error", err.Error())
-}
-
-func (s *OrgTestSuite) TestCreateOrganizationDBPoolAlreadyExistErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id", "provider_name", "image", "flavor"}).
- AddRow(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor))
-
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal(runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider"), err)
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchTagErr() {
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching tag: fetching tag from database: mocked fetching tag error", err.Error())
+ s.Require().Equal("error creating tag: error fetching tag from database: mocked fetching tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestCreateOrganizationPoolDBAddingPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnError(fmt.Errorf("mocked adding pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("adding pool: mocked adding pool error", err.Error())
+ s.Require().Equal("error creating pool: mocked adding pool error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestCreateOrganizationPoolDBSaveTagErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnError(fmt.Errorf("mocked saving tag error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving tag: mocked saving tag error", err.Error())
+ s.Require().Equal("error associating tags: mocked saving tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnResult(sqlmock.NewResult(1, 1))
@@ -639,161 +719,165 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: not found", err.Error())
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestListOrgPools() {
orgPools := []params.Pool{}
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Flavor = fmt.Sprintf("test-flavor-%v", i)
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
orgPools = append(orgPools, pool)
}
-
- pools, err := s.Store.ListOrgPools(context.Background(), s.Fixtures.Orgs[0].ID)
+ pools, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().Nil(err)
garmTesting.EqualDBEntityID(s.T(), orgPools, pools)
}
func (s *OrgTestSuite) TestListOrgPoolsInvalidOrgID() {
- _, err := s.Store.ListOrgPools(context.Background(), "dummy-org-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching pools: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pools: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestGetOrganizationPool() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
- orgPool, err := s.Store.GetOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID)
+ orgPool, err := s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
s.Require().Equal(orgPool.ID, pool.ID)
}
func (s *OrgTestSuite) TestGetOrganizationPoolInvalidOrgID() {
- _, err := s.Store.GetOrganizationPool(context.Background(), "dummy-org-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.GetEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("fetching pool: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationPool() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
- err = s.Store.DeleteOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID)
+ err = s.Store.DeleteEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
- _, err = s.Store.GetOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationPoolInvalidOrgID() {
- err := s.Store.DeleteOrganizationPool(context.Background(), "dummy-org-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ err := s.Store.DeleteEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("looking up org pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationPoolDBDeleteErr() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (id = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID, s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"org_id", "id"}).AddRow(s.Fixtures.Orgs[0].ID, pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE `pools`.`id` = ?")).
- WithArgs(pool.ID).
+ ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE id = ? and org_id = ?")).
+ WithArgs(pool.ID, s.Fixtures.Orgs[0].ID).
WillReturnError(fmt.Errorf("mocked deleting pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- err = s.StoreSQLMocked.DeleteOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID)
+ err = s.StoreSQLMocked.DeleteEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().NotNil(err)
+ s.Require().Equal("error removing pool: mocked deleting pool error", err.Error())
s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("deleting pool: mocked deleting pool error", err.Error())
-}
-
-func (s *OrgTestSuite) TestFindOrganizationPoolByTags() {
- orgPool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
- if err != nil {
- s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
- }
-
- pool, err := s.Store.FindOrganizationPoolByTags(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams.Tags)
-
- s.Require().Nil(err)
- s.Require().Equal(orgPool.ID, pool.ID)
- s.Require().Equal(orgPool.Image, pool.Image)
- s.Require().Equal(orgPool.Flavor, pool.Flavor)
-}
-
-func (s *OrgTestSuite) TestFindOrganizationPoolByTagsMissingTags() {
- tags := []string{}
-
- _, err := s.Store.FindOrganizationPoolByTags(context.Background(), s.Fixtures.Orgs[0].ID, tags)
-
- s.Require().NotNil(err)
- s.Require().Equal("fetching pool: missing tags", err.Error())
}
func (s *OrgTestSuite) TestListOrgInstances() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
poolInstances := []params.Instance{}
for i := 1; i <= 3; i++ {
s.Fixtures.CreateInstanceParams.Name = fmt.Sprintf("test-org-%v", i)
- instance, err := s.Store.CreateInstance(context.Background(), pool.ID, s.Fixtures.CreateInstanceParams)
+ instance, err := s.Store.CreateInstance(s.adminCtx, pool.ID, s.Fixtures.CreateInstanceParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create instance: %s", err))
}
poolInstances = append(poolInstances, instance)
}
- instances, err := s.Store.ListOrgInstances(context.Background(), s.Fixtures.Orgs[0].ID)
+ instances, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().Nil(err)
s.equalInstancesByName(poolInstances, instances)
}
func (s *OrgTestSuite) TestListOrgInstancesInvalidOrgID() {
- _, err := s.Store.ListOrgInstances(context.Background(), "dummy-org-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching entity: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganizationPool() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
- pool, err = s.Store.UpdateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID, s.Fixtures.UpdatePoolParams)
+ pool, err = s.Store.UpdateEntityPool(s.adminCtx, entity, pool.ID, s.Fixtures.UpdatePoolParams)
s.Require().Nil(err)
s.Require().Equal(*s.Fixtures.UpdatePoolParams.MaxRunners, pool.MaxRunners)
@@ -802,14 +886,39 @@ func (s *OrgTestSuite) TestUpdateOrganizationPool() {
s.Require().Equal(s.Fixtures.UpdatePoolParams.Flavor, pool.Flavor)
}
+func (s *OrgTestSuite) TestAddOrgEntityEvent() {
+ org, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ s.Fixtures.CreateOrgParams.Name,
+ s.testCreds,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+
+ s.Require().Nil(err)
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
+ err = s.Store.AddEntityEvent(s.adminCtx, entity, params.StatusEvent, params.EventInfo, "this is a test", 20)
+ s.Require().Nil(err)
+
+ org, err = s.Store.GetOrganizationByID(s.adminCtx, org.ID)
+ s.Require().Nil(err)
+ s.Require().Equal(1, len(org.Events))
+ s.Require().Equal(params.StatusEvent, org.Events[0].EventType)
+ s.Require().Equal(params.EventInfo, org.Events[0].EventLevel)
+ s.Require().Equal("this is a test", org.Events[0].Message)
+}
+
func (s *OrgTestSuite) TestUpdateOrganizationPoolInvalidOrgID() {
- _, err := s.Store.UpdateOrganizationPool(context.Background(), "dummy-org-id", "dummy-pool-id", s.Fixtures.UpdatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.UpdateEntityPool(s.adminCtx, entity, "dummy-pool-id", s.Fixtures.UpdatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
}
func TestOrgTestSuite(t *testing.T) {
- t.Parallel()
suite.Run(t, new(OrgTestSuite))
}
diff --git a/database/sql/pools.go b/database/sql/pools.go
index 7990e9d3..e86087ad 100644
--- a/database/sql/pools.go
+++ b/database/sql/pools.go
@@ -16,179 +16,224 @@ package sql
import (
"context"
+ "errors"
"fmt"
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm/params"
-
"github.com/google/uuid"
- "github.com/pkg/errors"
+ "gorm.io/datatypes"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) ListAllPools(ctx context.Context) ([]params.Pool, error) {
+const (
+ entityTypeEnterpriseName = "enterprise_id"
+ entityTypeOrgName = "org_id"
+ entityTypeRepoName = "repo_id"
+)
+
+func (s *sqlDatabase) ListAllPools(_ context.Context) ([]params.Pool, error) {
var pools []Pool
- q := s.conn.Model(&Pool{}).
+ q := s.conn.
Preload("Tags").
Preload("Organization").
+ Preload("Organization.Endpoint").
Preload("Repository").
+ Preload("Repository.Endpoint").
Preload("Enterprise").
+ Preload("Enterprise.Endpoint").
Omit("extra_specs").
Find(&pools)
if q.Error != nil {
- return nil, errors.Wrap(q.Error, "fetching all pools")
+ return nil, fmt.Errorf("error fetching all pools: %w", q.Error)
}
ret := make([]params.Pool, len(pools))
+ var err error
for idx, val := range pools {
- ret[idx] = s.sqlToCommonPool(val)
+ ret[idx], err = s.sqlToCommonPool(val)
+ if err != nil {
+ return nil, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) GetPoolByID(ctx context.Context, poolID string) (params.Pool, error) {
- pool, err := s.getPoolByID(ctx, poolID, "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool by ID")
+func (s *sqlDatabase) GetPoolByID(_ context.Context, poolID string) (params.Pool, error) {
+ preloadList := []string{
+ "Tags",
+ "Instances",
+ "Enterprise",
+ "Enterprise.Endpoint",
+ "Organization",
+ "Organization.Endpoint",
+ "Repository",
+ "Repository.Endpoint",
}
- return s.sqlToCommonPool(pool), nil
+ pool, err := s.getPoolByID(s.conn, poolID, preloadList...)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error fetching pool by ID: %w", err)
+ }
+ return s.sqlToCommonPool(pool)
}
-func (s *sqlDatabase) DeletePoolByID(ctx context.Context, poolID string) error {
- pool, err := s.getPoolByID(ctx, poolID)
+func (s *sqlDatabase) DeletePoolByID(_ context.Context, poolID string) (err error) {
+ pool, err := s.getPoolByID(s.conn, poolID)
if err != nil {
- return errors.Wrap(err, "fetching pool by ID")
+ return fmt.Errorf("error fetching pool by ID: %w", err)
}
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.PoolEntityType, common.DeleteOperation, params.Pool{ID: poolID})
+ }
+ }()
+
if q := s.conn.Unscoped().Delete(&pool); q.Error != nil {
- return errors.Wrap(q.Error, "removing pool")
+ return fmt.Errorf("error removing pool: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) getEntityPool(ctx context.Context, entityType params.PoolType, entityID, poolID string, preload ...string) (Pool, error) {
+func (s *sqlDatabase) getEntityPool(tx *gorm.DB, entityType params.ForgeEntityType, entityID, poolID string, preload ...string) (Pool, error) {
if entityID == "" {
- return Pool{}, errors.Wrap(runnerErrors.ErrBadRequest, "missing entity id")
+ return Pool{}, fmt.Errorf("error missing entity id: %w", runnerErrors.ErrBadRequest)
}
u, err := uuid.Parse(poolID)
if err != nil {
- return Pool{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return Pool{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
- q := s.conn
+ var fieldName string
+ var entityField string
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ entityField = repositoryFieldName
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ entityField = organizationFieldName
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ entityField = enterpriseFieldName
+ default:
+ return Pool{}, fmt.Errorf("invalid entityType: %v", entityType)
+ }
+
+ q := tx
+ q = q.Preload(entityField)
if len(preload) > 0 {
for _, item := range preload {
q = q.Preload(item)
}
}
- var fieldName string
- switch entityType {
- case params.RepositoryPool:
- fieldName = "repo_id"
- case params.OrganizationPool:
- fieldName = "org_id"
- case params.EnterprisePool:
- fieldName = "enterprise_id"
- default:
- return Pool{}, fmt.Errorf("invalid entityType: %v", entityType)
- }
-
var pool Pool
condition := fmt.Sprintf("id = ? and %s = ?", fieldName)
err = q.Model(&Pool{}).
Where(condition, u, entityID).
First(&pool).Error
-
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
- return Pool{}, errors.Wrap(runnerErrors.ErrNotFound, "finding pool")
+ return Pool{}, fmt.Errorf("error finding pool: %w", runnerErrors.ErrNotFound)
}
- return Pool{}, errors.Wrap(err, "fetching pool")
+ return Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
return pool, nil
}
-func (s *sqlDatabase) listEntityPools(ctx context.Context, entityType params.PoolType, entityID string, preload ...string) ([]Pool, error) {
+func (s *sqlDatabase) listEntityPools(tx *gorm.DB, entityType params.ForgeEntityType, entityID string, preload ...string) ([]Pool, error) {
if _, err := uuid.Parse(entityID); err != nil {
- return nil, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return nil, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
- q := s.conn
+ if err := s.hasGithubEntity(tx, entityType, entityID); err != nil {
+ return nil, fmt.Errorf("error checking entity existence: %w", err)
+ }
+
+ var preloadEntity string
+ var fieldName string
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ preloadEntity = "Repository"
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ preloadEntity = "Organization"
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ preloadEntity = "Enterprise"
+ default:
+ return nil, fmt.Errorf("invalid entityType: %v", entityType)
+ }
+
+ q := tx
+ q = q.Preload(preloadEntity)
if len(preload) > 0 {
for _, item := range preload {
q = q.Preload(item)
}
}
- var fieldName string
- switch entityType {
- case params.RepositoryPool:
- fieldName = "repo_id"
- case params.OrganizationPool:
- fieldName = "org_id"
- case params.EnterprisePool:
- fieldName = "enterprise_id"
- default:
- return nil, fmt.Errorf("invalid entityType: %v", entityType)
- }
-
var pools []Pool
condition := fmt.Sprintf("%s = ?", fieldName)
err := q.Model(&Pool{}).
Where(condition, entityID).
Omit("extra_specs").
Find(&pools).Error
-
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return []Pool{}, nil
}
- return nil, errors.Wrap(err, "fetching pool")
+ return nil, fmt.Errorf("error fetching pool: %w", err)
}
return pools, nil
}
-func (s *sqlDatabase) findPoolByTags(id string, poolType params.PoolType, tags []string) ([]params.Pool, error) {
+func (s *sqlDatabase) findPoolByTags(id string, poolType params.ForgeEntityType, tags []string) ([]params.Pool, error) {
if len(tags) == 0 {
return nil, runnerErrors.NewBadRequestError("missing tags")
}
u, err := uuid.Parse(id)
if err != nil {
- return nil, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return nil, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
var fieldName string
switch poolType {
- case params.RepositoryPool:
- fieldName = "repo_id"
- case params.OrganizationPool:
- fieldName = "org_id"
- case params.EnterprisePool:
- fieldName = "enterprise_id"
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
default:
return nil, fmt.Errorf("invalid poolType: %v", poolType)
}
var pools []Pool
- where := fmt.Sprintf("tags.name in ? and %s = ? and enabled = true", fieldName)
+ where := fmt.Sprintf("tags.name COLLATE NOCASE in ? and %s = ? and enabled = true", fieldName)
q := s.conn.Joins("JOIN pool_tags on pool_tags.pool_id=pools.id").
Joins("JOIN tags on tags.id=pool_tags.tag_id").
Group("pools.id").
Preload("Tags").
Having("count(1) = ?", len(tags)).
- Where(where, tags, u).Find(&pools)
+ Where(where, tags, u).
+ Order("priority desc").
+ Find(&pools)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return nil, runnerErrors.ErrNotFound
}
- return nil, errors.Wrap(q.Error, "fetching pool")
+ return nil, fmt.Errorf("error fetching pool: %w", q.Error)
}
if len(pools) == 0 {
@@ -197,13 +242,16 @@ func (s *sqlDatabase) findPoolByTags(id string, poolType params.PoolType, tags [
ret := make([]params.Pool, len(pools))
for idx, val := range pools {
- ret[idx] = s.sqlToCommonPool(val)
+ ret[idx], err = s.sqlToCommonPool(val)
+ if err != nil {
+ return nil, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) FindPoolsMatchingAllTags(ctx context.Context, entityType params.PoolType, entityID string, tags []string) ([]params.Pool, error) {
+func (s *sqlDatabase) FindPoolsMatchingAllTags(_ context.Context, entityType params.ForgeEntityType, entityID string, tags []string) ([]params.Pool, error) {
if len(tags) == 0 {
return nil, runnerErrors.NewBadRequestError("missing tags")
}
@@ -213,8 +261,205 @@ func (s *sqlDatabase) FindPoolsMatchingAllTags(ctx context.Context, entityType p
if errors.Is(err, runnerErrors.ErrNotFound) {
return []params.Pool{}, nil
}
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
+
+func (s *sqlDatabase) CreateEntityPool(ctx context.Context, entity params.ForgeEntity, param params.CreatePoolParams) (pool params.Pool, err error) {
+ if len(param.Tags) == 0 {
+ return params.Pool{}, runnerErrors.NewBadRequestError("no tags specified")
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.PoolEntityType, common.CreateOperation, pool)
+ }
+ }()
+
+ newPool := Pool{
+ ProviderName: param.ProviderName,
+ MaxRunners: param.MaxRunners,
+ MinIdleRunners: param.MinIdleRunners,
+ RunnerPrefix: param.GetRunnerPrefix(),
+ Image: param.Image,
+ Flavor: param.Flavor,
+ OSType: param.OSType,
+ OSArch: param.OSArch,
+ Enabled: param.Enabled,
+ RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
+ GitHubRunnerGroup: param.GitHubRunnerGroup,
+ Priority: param.Priority,
+ }
+ if len(param.ExtraSpecs) > 0 {
+ newPool.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
+ }
+
+ entityID, err := uuid.Parse(entity.ID)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ switch entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ newPool.RepoID = &entityID
+ case params.ForgeEntityTypeOrganization:
+ newPool.OrgID = &entityID
+ case params.ForgeEntityTypeEnterprise:
+ newPool.EnterpriseID = &entityID
+ }
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := s.hasGithubEntity(tx, entity.EntityType, entity.ID); err != nil {
+ return fmt.Errorf("error checking entity existence: %w", err)
+ }
+
+ tags := []Tag{}
+ for _, val := range param.Tags {
+ t, err := s.getOrCreateTag(tx, val)
+ if err != nil {
+ return fmt.Errorf("error creating tag: %w", err)
+ }
+ tags = append(tags, t)
+ }
+
+ q := tx.Create(&newPool)
+ if q.Error != nil {
+ return fmt.Errorf("error creating pool: %w", q.Error)
+ }
+
+ for i := range tags {
+ if err := tx.Model(&newPool).Association("Tags").Append(&tags[i]); err != nil {
+ return fmt.Errorf("error associating tags: %w", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return params.Pool{}, err
+ }
+
+ return s.GetPoolByID(ctx, newPool.ID.String())
+}
+
+func (s *sqlDatabase) GetEntityPool(_ context.Context, entity params.ForgeEntity, poolID string) (params.Pool, error) {
+ preloadList := []string{
+ "Tags",
+ "Instances",
+ "Enterprise",
+ "Enterprise.Endpoint",
+ "Organization",
+ "Organization.Endpoint",
+ "Repository",
+ "Repository.Endpoint",
+ }
+ pool, err := s.getEntityPool(s.conn, entity.EntityType, entity.ID, poolID, preloadList...)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("fetching pool: %w", err)
+ }
+ return s.sqlToCommonPool(pool)
+}
+
+func (s *sqlDatabase) DeleteEntityPool(_ context.Context, entity params.ForgeEntity, poolID string) (err error) {
+ entityID, err := uuid.Parse(entity.ID)
+ if err != nil {
+ return fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ defer func() {
+ if err == nil {
+ pool := params.Pool{
+ ID: poolID,
+ }
+ s.sendNotify(common.PoolEntityType, common.DeleteOperation, pool)
+ }
+ }()
+
+ poolUUID, err := uuid.Parse(poolID)
+ if err != nil {
+ return fmt.Errorf("error parsing pool id: %w", runnerErrors.ErrBadRequest)
+ }
+ var fieldName string
+ switch entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ default:
+ return fmt.Errorf("invalid entityType: %v", entity.EntityType)
+ }
+ condition := fmt.Sprintf("id = ? and %s = ?", fieldName)
+ if err := s.conn.Unscoped().Where(condition, poolUUID, entityID).Delete(&Pool{}).Error; err != nil {
+ return fmt.Errorf("error removing pool: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) UpdateEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string, param params.UpdatePoolParams) (updatedPool params.Pool, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.PoolEntityType, common.UpdateOperation, updatedPool)
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ pool, err := s.getEntityPool(tx, entity.EntityType, entity.ID, poolID, "Tags", "Instances")
+ if err != nil {
+ return fmt.Errorf("error fetching pool: %w", err)
+ }
+
+ updatedPool, err = s.updatePool(tx, pool, param)
+ if err != nil {
+ return fmt.Errorf("error updating pool: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.Pool{}, err
+ }
+
+ updatedPool, err = s.GetPoolByID(ctx, poolID)
+ if err != nil {
+ return params.Pool{}, err
+ }
+ return updatedPool, nil
+}
+
+func (s *sqlDatabase) ListEntityPools(_ context.Context, entity params.ForgeEntity) ([]params.Pool, error) {
+ pools, err := s.listEntityPools(s.conn, entity.EntityType, entity.ID, "Tags")
+ if err != nil {
+ return nil, fmt.Errorf("error fetching pools: %w", err)
+ }
+
+ ret := make([]params.Pool, len(pools))
+ for idx, pool := range pools {
+ ret[idx], err = s.sqlToCommonPool(pool)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching pool: %w", err)
+ }
+ }
+
+ return ret, nil
+}
+
+func (s *sqlDatabase) ListEntityInstances(_ context.Context, entity params.ForgeEntity) ([]params.Instance, error) {
+ pools, err := s.listEntityPools(s.conn, entity.EntityType, entity.ID, "Instances", "Instances.Job")
+ if err != nil {
+ return nil, fmt.Errorf("error fetching entity: %w", err)
+ }
+ ret := []params.Instance{}
+ for _, pool := range pools {
+ instances := pool.Instances
+ pool.Instances = nil
+ for _, instance := range instances {
+ instance.Pool = pool
+ paramsInstance, err := s.sqlToParamsInstance(instance)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching instance: %w", err)
+ }
+ ret = append(ret, paramsInstance)
+ }
+ }
+ return ret, nil
+}
diff --git a/database/sql/pools_test.go b/database/sql/pools_test.go
index 277ff4c5..297f4cdf 100644
--- a/database/sql/pools_test.go
+++ b/database/sql/pools_test.go
@@ -16,20 +16,23 @@ package sql
import (
"context"
+ "encoding/json"
"flag"
"fmt"
"regexp"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
-
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type PoolsTestFixtures struct {
@@ -40,9 +43,12 @@ type PoolsTestFixtures struct {
type PoolsTestSuite struct {
suite.Suite
- Store dbCommon.Store
+ Store dbCommon.Store
+ ctx context.Context
+
StoreSQLMocked *sqlDatabase
Fixtures *PoolsTestFixtures
+ adminCtx context.Context
}
func (s *PoolsTestSuite) assertSQLMockExpectations() {
@@ -52,26 +58,42 @@ func (s *PoolsTestSuite) assertSQLMockExpectations() {
}
}
+func (s *PoolsTestSuite) TearDownTest() {
+ watcher.CloseWatcher()
+}
+
func (s *PoolsTestSuite) SetupTest() {
// create testing sqlite database
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
+
db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
s.Store = db
+ s.ctx = garmTesting.ImpersonateAdminContext(ctx, s.Store, s.T())
+
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+
+ githubEndpoint := garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), githubEndpoint)
// create an organization for testing purposes
- org, err := s.Store.CreateOrganization(context.Background(), "test-org", "test-creds", "test-webhookSecret")
+ org, err := s.Store.CreateOrganization(s.adminCtx, "test-org", creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create org: %s", err))
}
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
// create some pool objects in the database, for testing purposes
orgPools := []params.Pool{}
for i := 1; i <= 3; i++ {
- pool, err := db.CreateOrganizationPool(
- context.Background(),
- org.ID,
+ pool, err := db.CreateEntityPool(
+ s.adminCtx,
+ entity,
params.CreatePoolParams{
ProviderName: "test-provider",
MaxRunners: 4,
@@ -79,7 +101,7 @@ func (s *PoolsTestSuite) SetupTest() {
Image: fmt.Sprintf("test-image-%d", i),
Flavor: "test-flavor",
OSType: "linux",
- Tags: []string{"self-hosted", "amd64", "linux"},
+ Tags: []string{"amd64-linux-runner"},
},
)
if err != nil {
@@ -99,7 +121,7 @@ func (s *PoolsTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -120,7 +142,7 @@ func (s *PoolsTestSuite) SetupTest() {
}
func (s *PoolsTestSuite) TestListAllPools() {
- pools, err := s.Store.ListAllPools(context.Background())
+ pools, err := s.Store.ListAllPools(s.adminCtx)
s.Require().Nil(err)
garmTesting.EqualDBEntityID(s.T(), s.Fixtures.Pools, pools)
@@ -128,49 +150,49 @@ func (s *PoolsTestSuite) TestListAllPools() {
func (s *PoolsTestSuite) TestListAllPoolsDBFetchErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT `pools`.`id`,`pools`.`created_at`,`pools`.`updated_at`,`pools`.`deleted_at`,`pools`.`provider_name`,`pools`.`runner_prefix`,`pools`.`max_runners`,`pools`.`min_idle_runners`,`pools`.`runner_bootstrap_timeout`,`pools`.`image`,`pools`.`flavor`,`pools`.`os_type`,`pools`.`os_arch`,`pools`.`enabled`,`pools`.`git_hub_runner_group`,`pools`.`repo_id`,`pools`.`org_id`,`pools`.`enterprise_id` FROM `pools` WHERE `pools`.`deleted_at` IS NULL")).
+ ExpectQuery(regexp.QuoteMeta("SELECT `pools`.`id`,`pools`.`created_at`,`pools`.`updated_at`,`pools`.`deleted_at`,`pools`.`provider_name`,`pools`.`runner_prefix`,`pools`.`max_runners`,`pools`.`min_idle_runners`,`pools`.`runner_bootstrap_timeout`,`pools`.`image`,`pools`.`flavor`,`pools`.`os_type`,`pools`.`os_arch`,`pools`.`enabled`,`pools`.`git_hub_runner_group`,`pools`.`repo_id`,`pools`.`org_id`,`pools`.`enterprise_id`,`pools`.`priority` FROM `pools` WHERE `pools`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("mocked fetching all pools error"))
- _, err := s.StoreSQLMocked.ListAllPools(context.Background())
+ _, err := s.StoreSQLMocked.ListAllPools(s.adminCtx)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching all pools: mocked fetching all pools error", err.Error())
+ s.Require().Equal("error fetching all pools: mocked fetching all pools error", err.Error())
}
func (s *PoolsTestSuite) TestGetPoolByID() {
- pool, err := s.Store.GetPoolByID(context.Background(), s.Fixtures.Pools[0].ID)
+ pool, err := s.Store.GetPoolByID(s.adminCtx, s.Fixtures.Pools[0].ID)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Pools[0].ID, pool.ID)
}
func (s *PoolsTestSuite) TestGetPoolByIDInvalidPoolID() {
- _, err := s.Store.GetPoolByID(context.Background(), "dummy-pool-id")
+ _, err := s.Store.GetPoolByID(s.adminCtx, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool by ID: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool by ID: error parsing id: invalid request", err.Error())
}
func (s *PoolsTestSuite) TestDeletePoolByID() {
- err := s.Store.DeletePoolByID(context.Background(), s.Fixtures.Pools[0].ID)
+ err := s.Store.DeletePoolByID(s.adminCtx, s.Fixtures.Pools[0].ID)
s.Require().Nil(err)
- _, err = s.Store.GetPoolByID(context.Background(), s.Fixtures.Pools[0].ID)
- s.Require().Equal("fetching pool by ID: not found", err.Error())
+ _, err = s.Store.GetPoolByID(s.adminCtx, s.Fixtures.Pools[0].ID)
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
}
func (s *PoolsTestSuite) TestDeletePoolByIDInvalidPoolID() {
- err := s.Store.DeletePoolByID(context.Background(), "dummy-pool-id")
+ err := s.Store.DeletePoolByID(s.adminCtx, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool by ID: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool by ID: error parsing id: invalid request", err.Error())
}
func (s *PoolsTestSuite) TestDeletePoolByIDDBRemoveErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1 ")).
- WithArgs(s.Fixtures.Pools[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Pools[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Pools[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -178,14 +200,139 @@ func (s *PoolsTestSuite) TestDeletePoolByIDDBRemoveErr() {
WillReturnError(fmt.Errorf("mocked removing pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeletePoolByID(context.Background(), s.Fixtures.Pools[0].ID)
+ err := s.StoreSQLMocked.DeletePoolByID(s.adminCtx, s.Fixtures.Pools[0].ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("removing pool: mocked removing pool error", err.Error())
+ s.Require().Equal("error removing pool: mocked removing pool error", err.Error())
+}
+
+func (s *PoolsTestSuite) TestEntityPoolOperations() {
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.Store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.Store, s.T(), ep)
+ s.T().Cleanup(func() { s.Store.DeleteGithubCredentials(s.ctx, creds.ID) })
+ repo, err := s.Store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.Store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createPoolParams := params.CreatePoolParams{
+ ProviderName: "test-provider",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+
+ pool, err := s.Store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+ s.T().Cleanup(func() { s.Store.DeleteEntityPool(s.ctx, entity, pool.ID) })
+
+ entityPool, err := s.Store.GetEntityPool(s.ctx, entity, pool.ID)
+ s.Require().NoError(err)
+ s.Require().Equal(pool.ID, entityPool.ID)
+ s.Require().Equal(pool.ProviderName, entityPool.ProviderName)
+
+ updatePoolParams := params.UpdatePoolParams{
+ Enabled: garmTesting.Ptr(true),
+ Flavor: "new-flavor",
+ Image: "new-image",
+ RunnerPrefix: params.RunnerPrefix{
+ Prefix: "new-prefix",
+ },
+ MaxRunners: garmTesting.Ptr(uint(100)),
+ MinIdleRunners: garmTesting.Ptr(uint(50)),
+ OSType: commonParams.Windows,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"new-tag"},
+ RunnerBootstrapTimeout: garmTesting.Ptr(uint(10)),
+ ExtraSpecs: json.RawMessage(`{"extra": "specs"}`),
+ GitHubRunnerGroup: garmTesting.Ptr("new-group"),
+ Priority: garmTesting.Ptr(uint(1)),
+ }
+ pool, err = s.Store.UpdateEntityPool(s.ctx, entity, pool.ID, updatePoolParams)
+ s.Require().NoError(err)
+ s.Require().Equal(*updatePoolParams.Enabled, pool.Enabled)
+ s.Require().Equal(updatePoolParams.Flavor, pool.Flavor)
+ s.Require().Equal(updatePoolParams.Image, pool.Image)
+ s.Require().Equal(updatePoolParams.RunnerPrefix.Prefix, pool.RunnerPrefix.Prefix)
+ s.Require().Equal(*updatePoolParams.MaxRunners, pool.MaxRunners)
+ s.Require().Equal(*updatePoolParams.MinIdleRunners, pool.MinIdleRunners)
+ s.Require().Equal(updatePoolParams.OSType, pool.OSType)
+ s.Require().Equal(updatePoolParams.OSArch, pool.OSArch)
+ s.Require().Equal(*updatePoolParams.RunnerBootstrapTimeout, pool.RunnerBootstrapTimeout)
+ s.Require().Equal(updatePoolParams.ExtraSpecs, pool.ExtraSpecs)
+ s.Require().Equal(*updatePoolParams.GitHubRunnerGroup, pool.GitHubRunnerGroup)
+ s.Require().Equal(*updatePoolParams.Priority, pool.Priority)
+
+ entityPools, err := s.Store.ListEntityPools(s.ctx, entity)
+ s.Require().NoError(err)
+ s.Require().Len(entityPools, 1)
+ s.Require().Equal(pool.ID, entityPools[0].ID)
+
+ tagsToMatch := []string{"new-tag"}
+ pools, err := s.Store.FindPoolsMatchingAllTags(s.ctx, entity.EntityType, entity.ID, tagsToMatch)
+ s.Require().NoError(err)
+ s.Require().Len(pools, 1)
+ s.Require().Equal(pool.ID, pools[0].ID)
+
+ invalidTagsToMatch := []string{"invalid-tag"}
+ pools, err = s.Store.FindPoolsMatchingAllTags(s.ctx, entity.EntityType, entity.ID, invalidTagsToMatch)
+ s.Require().NoError(err)
+ s.Require().Len(pools, 0)
+}
+
+func (s *PoolsTestSuite) TestListEntityInstances() {
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.Store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.Store, s.T(), ep)
+ s.T().Cleanup(func() { s.Store.DeleteGithubCredentials(s.ctx, creds.ID) })
+ repo, err := s.Store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.Store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createPoolParams := params.CreatePoolParams{
+ ProviderName: "test-provider",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+
+ pool, err := s.Store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+ s.T().Cleanup(func() { s.Store.DeleteEntityPool(s.ctx, entity, pool.ID) })
+
+ createInstanceParams := params.CreateInstanceParams{
+ Name: "test-instance",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Status: commonParams.InstanceCreating,
+ }
+ instance, err := s.Store.CreateInstance(s.ctx, pool.ID, createInstanceParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(instance.ID)
+
+ s.T().Cleanup(func() { s.Store.DeleteInstance(s.ctx, pool.ID, instance.ID) })
+
+ instances, err := s.Store.ListEntityInstances(s.ctx, entity)
+ s.Require().NoError(err)
+ s.Require().Len(instances, 1)
+ s.Require().Equal(instance.ID, instances[0].ID)
+ s.Require().Equal(instance.Name, instances[0].Name)
+ s.Require().Equal(instance.ProviderName, pool.ProviderName)
}
func TestPoolsTestSuite(t *testing.T) {
- t.Parallel()
suite.Run(t, new(PoolsTestSuite))
}
diff --git a/database/sql/repositories.go b/database/sql/repositories.go
index 007a2f6e..72b535e8 100644
--- a/database/sql/repositories.go
+++ b/database/sql/repositories.go
@@ -16,271 +16,241 @@ package sql
import (
"context"
+ "errors"
"fmt"
+ "log/slog"
+
+ "github.com/google/uuid"
+ "gorm.io/gorm"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/database/common"
"github.com/cloudbase/garm/params"
-
- "github.com/google/uuid"
- "github.com/pkg/errors"
- "gorm.io/datatypes"
- "gorm.io/gorm"
)
-func (s *sqlDatabase) CreateRepository(ctx context.Context, owner, name, credentialsName, webhookSecret string) (params.Repository, error) {
+func (s *sqlDatabase) CreateRepository(ctx context.Context, owner, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (param params.Repository, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.RepositoryEntityType, common.CreateOperation, param)
+ }
+ }()
+
if webhookSecret == "" {
return params.Repository{}, errors.New("creating repo: missing secret")
}
- secret, err := util.Aes256EncodeString(webhookSecret, s.cfg.Passphrase)
+ secret, err := util.Seal([]byte(webhookSecret), []byte(s.cfg.Passphrase))
if err != nil {
return params.Repository{}, fmt.Errorf("failed to encrypt string")
}
+
newRepo := Repository{
- Name: name,
- Owner: owner,
- WebhookSecret: secret,
- CredentialsName: credentialsName,
+ Name: name,
+ Owner: owner,
+ WebhookSecret: secret,
+ PoolBalancerType: poolBalancerType,
}
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ switch credentials.ForgeType {
+ case params.GithubEndpointType:
+ newRepo.CredentialsID = &credentials.ID
+ case params.GiteaEndpointType:
+ newRepo.GiteaCredentialsID = &credentials.ID
+ default:
+ return runnerErrors.NewBadRequestError("unsupported credentials type")
+ }
- q := s.conn.Create(&newRepo)
- if q.Error != nil {
- return params.Repository{}, errors.Wrap(q.Error, "creating repository")
- }
-
- param, err := s.sqlToCommonRepository(newRepo)
+ newRepo.EndpointName = &credentials.Endpoint.Name
+ q := tx.Create(&newRepo)
+ if q.Error != nil {
+ return fmt.Errorf("error creating repository: %w", q.Error)
+ }
+ return nil
+ })
if err != nil {
- return params.Repository{}, errors.Wrap(err, "creating repository")
+ return params.Repository{}, fmt.Errorf("error creating repository: %w", err)
+ }
+
+ ret, err := s.GetRepositoryByID(ctx, newRepo.ID.String())
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error creating repository: %w", err)
+ }
+
+ return ret, nil
+}
+
+func (s *sqlDatabase) GetRepository(ctx context.Context, owner, name, endpointName string) (params.Repository, error) {
+ repo, err := s.getRepo(ctx, owner, name, endpointName)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
+ }
+
+ param, err := s.sqlToCommonRepository(repo, true)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) GetRepository(ctx context.Context, owner, name string) (params.Repository, error) {
- repo, err := s.getRepo(ctx, owner, name)
- if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
- }
-
- param, err := s.sqlToCommonRepository(repo)
- if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
- }
-
- return param, nil
-}
-
-func (s *sqlDatabase) ListRepositories(ctx context.Context) ([]params.Repository, error) {
+func (s *sqlDatabase) ListRepositories(_ context.Context, filter params.RepositoryFilter) ([]params.Repository, error) {
var repos []Repository
- q := s.conn.Find(&repos)
+ q := s.conn.
+ Preload("Credentials").
+ Preload("GiteaCredentials").
+ Preload("Credentials.Endpoint").
+ Preload("GiteaCredentials.Endpoint").
+ Preload("Endpoint")
+ if filter.Owner != "" {
+ q = q.Where("owner = ?", filter.Owner)
+ }
+ if filter.Name != "" {
+ q = q.Where("name = ?", filter.Name)
+ }
+ if filter.Endpoint != "" {
+ q = q.Where("endpoint_name = ?", filter.Endpoint)
+ }
+ q = q.Find(&repos)
if q.Error != nil {
- return []params.Repository{}, errors.Wrap(q.Error, "fetching user from database")
+ return []params.Repository{}, fmt.Errorf("error fetching user from database: %w", q.Error)
}
ret := make([]params.Repository, len(repos))
for idx, val := range repos {
var err error
- ret[idx], err = s.sqlToCommonRepository(val)
+ ret[idx], err = s.sqlToCommonRepository(val, true)
if err != nil {
- return nil, errors.Wrap(err, "fetching repositories")
+ return nil, fmt.Errorf("error fetching repositories: %w", err)
}
}
return ret, nil
}
-func (s *sqlDatabase) DeleteRepository(ctx context.Context, repoID string) error {
- repo, err := s.getRepoByID(ctx, repoID)
+func (s *sqlDatabase) DeleteRepository(ctx context.Context, repoID string) (err error) {
+ repo, err := s.getRepoByID(ctx, s.conn, repoID, "Endpoint", "Credentials", "Credentials.Endpoint", "GiteaCredentials", "GiteaCredentials.Endpoint")
if err != nil {
- return errors.Wrap(err, "fetching repo")
+ return fmt.Errorf("error fetching repo: %w", err)
}
+ defer func(repo Repository) {
+ if err == nil {
+ asParam, innerErr := s.sqlToCommonRepository(repo, true)
+ if innerErr == nil {
+ s.sendNotify(common.RepositoryEntityType, common.DeleteOperation, asParam)
+ } else {
+ slog.With(slog.Any("error", innerErr)).ErrorContext(ctx, "error sending delete notification", "repo", repoID)
+ }
+ }
+ }(repo)
+
q := s.conn.Unscoped().Delete(&repo)
if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting repo")
+ return fmt.Errorf("error deleting repo: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (params.Repository, error) {
- repo, err := s.getRepoByID(ctx, repoID)
- if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
- }
-
- if param.CredentialsName != "" {
- repo.CredentialsName = param.CredentialsName
- }
-
- if param.WebhookSecret != "" {
- secret, err := util.Aes256EncodeString(param.WebhookSecret, s.cfg.Passphrase)
- if err != nil {
- return params.Repository{}, fmt.Errorf("saving repo: failed to encrypt string: %w", err)
+func (s *sqlDatabase) UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (newParams params.Repository, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.RepositoryEntityType, common.UpdateOperation, newParams)
+ }
+ }()
+ var repo Repository
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var err error
+ repo, err = s.getRepoByID(ctx, tx, repoID)
+ if err != nil {
+ return fmt.Errorf("error fetching repo: %w", err)
+ }
+ if repo.EndpointName == nil {
+ return runnerErrors.NewUnprocessableError("repository has no endpoint")
}
- repo.WebhookSecret = secret
- }
- q := s.conn.Save(&repo)
- if q.Error != nil {
- return params.Repository{}, errors.Wrap(q.Error, "saving repo")
- }
+ if param.CredentialsName != "" {
+ creds, err = s.getGithubCredentialsByName(ctx, tx, param.CredentialsName, false)
+ if err != nil {
+ return fmt.Errorf("error fetching credentials: %w", err)
+ }
+ if creds.EndpointName == nil {
+ return runnerErrors.NewUnprocessableError("credentials have no endpoint")
+ }
- newParams, err := s.sqlToCommonRepository(repo)
+ if *creds.EndpointName != *repo.EndpointName {
+ return runnerErrors.NewBadRequestError("endpoint mismatch")
+ }
+ repo.CredentialsID = &creds.ID
+ }
+
+ if param.WebhookSecret != "" {
+ secret, err := util.Seal([]byte(param.WebhookSecret), []byte(s.cfg.Passphrase))
+ if err != nil {
+ return fmt.Errorf("saving repo: failed to encrypt string: %w", err)
+ }
+ repo.WebhookSecret = secret
+ }
+
+ if param.PoolBalancerType != "" {
+ repo.PoolBalancerType = param.PoolBalancerType
+ }
+
+ q := tx.Save(&repo)
+ if q.Error != nil {
+ return fmt.Errorf("error saving repo: %w", q.Error)
+ }
+
+ return nil
+ })
if err != nil {
- return params.Repository{}, errors.Wrap(err, "saving repo")
+ return params.Repository{}, fmt.Errorf("error saving repo: %w", err)
+ }
+
+ repo, err = s.getRepoByID(ctx, s.conn, repoID, "Endpoint", "Credentials", "Credentials.Endpoint", "GiteaCredentials", "GiteaCredentials.Endpoint")
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error updating enterprise: %w", err)
+ }
+
+ newParams, err = s.sqlToCommonRepository(repo, true)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error saving repo: %w", err)
}
return newParams, nil
}
func (s *sqlDatabase) GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error) {
- repo, err := s.getRepoByID(ctx, repoID, "Pools")
+ preloadList := []string{
+ "Pools",
+ "Credentials",
+ "Endpoint",
+ "Credentials.Endpoint",
+ "GiteaCredentials",
+ "GiteaCredentials.Endpoint",
+ "Events",
+ }
+ repo, err := s.getRepoByID(ctx, s.conn, repoID, preloadList...)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
}
- param, err := s.sqlToCommonRepository(repo)
+ param, err := s.sqlToCommonRepository(repo, true)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) CreateRepositoryPool(ctx context.Context, repoId string, param params.CreatePoolParams) (params.Pool, error) {
- if len(param.Tags) == 0 {
- return params.Pool{}, runnerErrors.NewBadRequestError("no tags specified")
- }
-
- repo, err := s.getRepoByID(ctx, repoId)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching repo")
- }
-
- newPool := Pool{
- ProviderName: param.ProviderName,
- MaxRunners: param.MaxRunners,
- MinIdleRunners: param.MinIdleRunners,
- RunnerPrefix: param.GetRunnerPrefix(),
- Image: param.Image,
- Flavor: param.Flavor,
- OSType: param.OSType,
- OSArch: param.OSArch,
- RepoID: &repo.ID,
- Enabled: param.Enabled,
- RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
- }
-
- if len(param.ExtraSpecs) > 0 {
- newPool.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
- }
-
- _, err = s.getRepoPoolByUniqueFields(ctx, repoId, newPool.ProviderName, newPool.Image, newPool.Flavor)
- if err != nil {
- if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Pool{}, errors.Wrap(err, "creating pool")
- }
- } else {
- return params.Pool{}, runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider")
- }
-
- tags := []Tag{}
- for _, val := range param.Tags {
- t, err := s.getOrCreateTag(val)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching tag")
- }
- tags = append(tags, t)
- }
-
- q := s.conn.Create(&newPool)
- if q.Error != nil {
- return params.Pool{}, errors.Wrap(q.Error, "adding pool")
- }
-
- for _, tt := range tags {
- if err := s.conn.Model(&newPool).Association("Tags").Append(&tt); err != nil {
- return params.Pool{}, errors.Wrap(err, "saving tag")
- }
- }
-
- pool, err := s.getPoolByID(ctx, newPool.ID.String(), "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) ListRepoPools(ctx context.Context, repoID string) ([]params.Pool, error) {
- pools, err := s.listEntityPools(ctx, params.RepositoryPool, repoID, "Tags", "Instances")
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
-
- ret := make([]params.Pool, len(pools))
- for idx, pool := range pools {
- ret[idx] = s.sqlToCommonPool(pool)
- }
-
- return ret, nil
-}
-
-func (s *sqlDatabase) GetRepositoryPool(ctx context.Context, repoID, poolID string) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.RepositoryPool, repoID, poolID, "Tags", "Instances")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) DeleteRepositoryPool(ctx context.Context, repoID, poolID string) error {
- pool, err := s.getEntityPool(ctx, params.RepositoryPool, repoID, poolID)
- if err != nil {
- return errors.Wrap(err, "looking up repo pool")
- }
- q := s.conn.Unscoped().Delete(&pool)
- if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting pool")
- }
- return nil
-}
-
-func (s *sqlDatabase) FindRepositoryPoolByTags(ctx context.Context, repoID string, tags []string) (params.Pool, error) {
- pool, err := s.findPoolByTags(repoID, params.RepositoryPool, tags)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool[0], nil
-}
-
-func (s *sqlDatabase) ListRepoInstances(ctx context.Context, repoID string) ([]params.Instance, error) {
- pools, err := s.listEntityPools(ctx, params.RepositoryPool, repoID, "Tags", "Instances")
- if err != nil {
- return nil, errors.Wrap(err, "fetching repo")
- }
-
- ret := []params.Instance{}
- for _, pool := range pools {
- for _, instance := range pool.Instances {
- ret = append(ret, s.sqlToParamsInstance(instance))
- }
- }
- return ret, nil
-}
-
-func (s *sqlDatabase) UpdateRepositoryPool(ctx context.Context, repoID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.RepositoryPool, repoID, poolID, "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.updatePool(pool, param)
-}
-
-func (s *sqlDatabase) getRepo(ctx context.Context, owner, name string) (Repository, error) {
+func (s *sqlDatabase) getRepo(_ context.Context, owner, name, endpointName string) (Repository, error) {
var repo Repository
- q := s.conn.Where("name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE", name, owner).
+ q := s.conn.Where("name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE", name, owner, endpointName).
+ Preload("Credentials").
+ Preload("Credentials.Endpoint").
+ Preload("GiteaCredentials").
+ Preload("GiteaCredentials.Endpoint").
+ Preload("Endpoint").
First(&repo)
q = q.First(&repo)
@@ -289,38 +259,19 @@ func (s *sqlDatabase) getRepo(ctx context.Context, owner, name string) (Reposito
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Repository{}, runnerErrors.ErrNotFound
}
- return Repository{}, errors.Wrap(q.Error, "fetching repository from database")
+ return Repository{}, fmt.Errorf("error fetching repository from database: %w", q.Error)
}
return repo, nil
}
-func (s *sqlDatabase) getRepoPoolByUniqueFields(ctx context.Context, repoID string, provider, image, flavor string) (Pool, error) {
- repo, err := s.getRepoByID(ctx, repoID)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching repo")
- }
-
- q := s.conn
- var pool []Pool
- err = q.Model(&repo).Association("Pools").Find(&pool, "provider_name = ? and image = ? and flavor = ?", provider, image, flavor)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching pool")
- }
- if len(pool) == 0 {
- return Pool{}, runnerErrors.ErrNotFound
- }
-
- return pool[0], nil
-}
-
-func (s *sqlDatabase) getRepoByID(ctx context.Context, id string, preload ...string) (Repository, error) {
+func (s *sqlDatabase) getRepoByID(_ context.Context, tx *gorm.DB, id string, preload ...string) (Repository, error) {
u, err := uuid.Parse(id)
if err != nil {
- return Repository{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return Repository{}, runnerErrors.NewBadRequestError("error parsing id: %s", err)
}
var repo Repository
- q := s.conn
+ q := tx
if len(preload) > 0 {
for _, field := range preload {
q = q.Preload(field)
@@ -332,7 +283,7 @@ func (s *sqlDatabase) getRepoByID(ctx context.Context, id string, preload ...str
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Repository{}, runnerErrors.ErrNotFound
}
- return Repository{}, errors.Wrap(q.Error, "fetching repository from database")
+ return Repository{}, fmt.Errorf("error fetching repository from database: %w", q.Error)
}
return repo, nil
}
diff --git a/database/sql/repositories_test.go b/database/sql/repositories_test.go
index d87cc37b..b3c15eca 100644
--- a/database/sql/repositories_test.go
+++ b/database/sql/repositories_test.go
@@ -22,15 +22,17 @@ import (
"sort"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
-
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ "github.com/cloudbase/garm/auth"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type RepoTestFixtures struct {
@@ -43,11 +45,24 @@ type RepoTestFixtures struct {
SQLMock sqlmock.Sqlmock
}
+func init() {
+ watcher.SetWatcher(&garmTesting.MockWatcher{})
+}
+
type RepoTestSuite struct {
suite.Suite
Store dbCommon.Store
StoreSQLMocked *sqlDatabase
Fixtures *RepoTestFixtures
+
+ adminCtx context.Context
+ adminUserID string
+
+ testCreds params.ForgeCredentials
+ testCredsGitea params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ giteaEndpoint params.ForgeEndpoint
}
func (s *RepoTestSuite) equalReposByName(expected, actual []params.Repository) {
@@ -81,21 +96,36 @@ func (s *RepoTestSuite) assertSQLMockExpectations() {
func (s *RepoTestSuite) SetupTest() {
// create testing sqlite database
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
+
db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
s.Store = db
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+ s.adminUserID = auth.UserID(adminCtx)
+ s.Require().NotEmpty(s.adminUserID)
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.giteaEndpoint = garmTesting.CreateDefaultGiteaEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.testCredsGitea = garmTesting.CreateTestGiteaCredentials(adminCtx, "new-creds", db, s.T(), s.giteaEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create some repository objects in the database, for testing purposes
repos := []params.Repository{}
for i := 1; i <= 3; i++ {
repo, err := db.CreateRepository(
- context.Background(),
+ adminCtx,
fmt.Sprintf("test-owner-%d", i),
fmt.Sprintf("test-repo-%d", i),
- fmt.Sprintf("test-creds-%d", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%d", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create database object (test-repo-%d): %v", i, err))
@@ -115,7 +145,7 @@ func (s *RepoTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -135,7 +165,7 @@ func (s *RepoTestSuite) SetupTest() {
CreateRepoParams: params.CreateRepoParams{
Owner: "test-owner-repo",
Name: "test-repo",
- CredentialsName: "test-creds-repo",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -147,14 +177,14 @@ func (s *RepoTestSuite) SetupTest() {
Flavor: "test-flavor",
OSType: "windows",
OSArch: "amd64",
- Tags: []string{"self-hosted", "arm64", "windows"},
+ Tags: []string{"arm64-windows-runner"},
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance",
OSType: "linux",
},
UpdateRepoParams: params.UpdateEntityParams{
- CredentialsName: "test-update-creds",
+ CredentialsName: s.secondaryTestCreds.Name,
WebhookSecret: "test-update-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -168,26 +198,93 @@ func (s *RepoTestSuite) SetupTest() {
s.Fixtures = fixtures
}
+func (s *RepoTestSuite) TearDownTest() {
+ watcher.CloseWatcher()
+}
+
func (s *RepoTestSuite) TestCreateRepository() {
// call tested function
repo, err := s.Store.CreateRepository(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateRepoParams.Owner,
s.Fixtures.CreateRepoParams.Name,
- s.Fixtures.CreateRepoParams.CredentialsName,
+ s.testCreds,
s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
)
// assertions
s.Require().Nil(err)
- storeRepo, err := s.Store.GetRepositoryByID(context.Background(), repo.ID)
+ storeRepo, err := s.Store.GetRepositoryByID(s.adminCtx, repo.ID)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get repository by id: %v", err))
}
s.Require().Equal(storeRepo.Owner, repo.Owner)
s.Require().Equal(storeRepo.Name, repo.Name)
- s.Require().Equal(storeRepo.CredentialsName, repo.CredentialsName)
+ s.Require().Equal(storeRepo.Credentials.Name, repo.Credentials.Name)
s.Require().Equal(storeRepo.WebhookSecret, repo.WebhookSecret)
+
+ entity, err := repo.GetEntity()
+ s.Require().Nil(err)
+ s.Require().Equal(s.Fixtures.CreateRepoParams.Owner, entity.Owner)
+ s.Require().Equal(entity.EntityType, params.ForgeEntityTypeRepository)
+
+ forgeType, err := entity.GetForgeType()
+ s.Require().Nil(err)
+ s.Require().Equal(forgeType, params.GithubEndpointType)
+}
+
+func (s *RepoTestSuite) TestCreateRepositoryGitea() {
+ // call tested function
+ repo, err := s.Store.CreateRepository(
+ s.adminCtx,
+ s.Fixtures.CreateRepoParams.Owner,
+ s.Fixtures.CreateRepoParams.Name,
+ s.testCredsGitea,
+ s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
+ )
+
+ // assertions
+ s.Require().Nil(err)
+ storeRepo, err := s.Store.GetRepositoryByID(s.adminCtx, repo.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get repository by id: %v", err))
+ }
+ s.Require().Equal(storeRepo.Owner, repo.Owner)
+ s.Require().Equal(storeRepo.Name, repo.Name)
+ s.Require().Equal(storeRepo.Credentials.Name, repo.Credentials.Name)
+ s.Require().Equal(storeRepo.WebhookSecret, repo.WebhookSecret)
+
+ entity, err := repo.GetEntity()
+ s.Require().Nil(err)
+ s.Require().Equal(repo.ID, entity.ID)
+ s.Require().Equal(entity.EntityType, params.ForgeEntityTypeRepository)
+
+ forgeType, err := entity.GetForgeType()
+ s.Require().Nil(err)
+ s.Require().Equal(forgeType, params.GiteaEndpointType)
+}
+
+func (s *RepoTestSuite) TestCreateRepositoryInvalidForgeType() {
+ // call tested function
+ _, err := s.Store.CreateRepository(
+ s.adminCtx,
+ s.Fixtures.CreateRepoParams.Owner,
+ s.Fixtures.CreateRepoParams.Name,
+ params.ForgeCredentials{
+ Name: "test-creds",
+ ForgeType: "invalid-forge-type",
+ Endpoint: params.ForgeEndpoint{
+ Name: "test-endpoint",
+ },
+ },
+ s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
+ )
+
+ s.Require().NotNil(err)
+ s.Require().Equal("error creating repository: unsupported credentials type", err.Error())
}
func (s *RepoTestSuite) TestCreateRepositoryInvalidDBPassphrase() {
@@ -197,18 +294,19 @@ func (s *RepoTestSuite) TestCreateRepositoryInvalidDBPassphrase() {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
// make sure we use a 'sqlDatabase' struct with a wrong 'cfg.Passphrase'
- cfg.Passphrase = "wrong-passphrase" // it must have a size different than 32
+ cfg.Passphrase = wrongPassphrase // it must have a size different than 32
sqlDB := &sqlDatabase{
conn: conn,
cfg: cfg,
}
_, err = sqlDB.CreateRepository(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateRepoParams.Owner,
s.Fixtures.CreateRepoParams.Name,
- s.Fixtures.CreateRepoParams.CredentialsName,
+ s.testCreds,
s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
)
s.Require().NotNil(err)
@@ -223,20 +321,21 @@ func (s *RepoTestSuite) TestCreateRepositoryInvalidDBCreateErr() {
s.Fixtures.SQLMock.ExpectRollback()
_, err := s.StoreSQLMocked.CreateRepository(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateRepoParams.Owner,
s.Fixtures.CreateRepoParams.Name,
- s.Fixtures.CreateRepoParams.CredentialsName,
+ s.testCreds,
s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating repository: creating repo mock error", err.Error())
+ s.Require().Equal("error creating repository: error creating repository: creating repo mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestGetRepository() {
- repo, err := s.Store.GetRepository(context.Background(), s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name)
+ repo, err := s.Store.GetRepository(s.adminCtx, s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Endpoint.Name)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Repos[0].Owner, repo.Owner)
@@ -245,7 +344,7 @@ func (s *RepoTestSuite) TestGetRepository() {
}
func (s *RepoTestSuite) TestGetRepositoryCaseInsensitive() {
- repo, err := s.Store.GetRepository(context.Background(), "TeSt-oWnEr-1", "TeSt-rEpO-1")
+ repo, err := s.Store.GetRepository(s.adminCtx, "TeSt-oWnEr-1", "TeSt-rEpO-1", "github.com")
s.Require().Nil(err)
s.Require().Equal("test-owner-1", repo.Owner)
@@ -253,82 +352,163 @@ func (s *RepoTestSuite) TestGetRepositoryCaseInsensitive() {
}
func (s *RepoTestSuite) TestGetRepositoryNotFound() {
- _, err := s.Store.GetRepository(context.Background(), "dummy-owner", "dummy-name")
+ _, err := s.Store.GetRepository(s.adminCtx, "dummy-owner", "dummy-name", "github.com")
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: not found", err.Error())
+ s.Require().Equal("error fetching repo: not found", err.Error())
}
func (s *RepoTestSuite) TestGetRepositoryDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Endpoint.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name", "owner"}).AddRow(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id`,`repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id`,`repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Endpoint.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name", "owner"}).AddRow(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner))
- _, err := s.StoreSQLMocked.GetRepository(context.Background(), s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name)
+ _, err := s.StoreSQLMocked.GetRepository(s.adminCtx, s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Endpoint.Name)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: missing secret", err.Error())
+ s.Require().Equal("error fetching repo: missing secret", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestListRepositories() {
- repos, err := s.Store.ListRepositories((context.Background()))
+ repos, err := s.Store.ListRepositories(s.adminCtx, params.RepositoryFilter{})
s.Require().Nil(err)
s.equalReposByName(s.Fixtures.Repos, repos)
}
+func (s *RepoTestSuite) TestListRepositoriesWithFilters() {
+ repo, err := s.Store.CreateRepository(
+ s.adminCtx,
+ "test-owner",
+ "test-repo",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ repo2, err := s.Store.CreateRepository(
+ s.adminCtx,
+ "test-owner",
+ "test-repo",
+ s.testCredsGitea,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ repo3, err := s.Store.CreateRepository(
+ s.adminCtx,
+ "test-owner",
+ "test-repo2",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ repo4, err := s.Store.CreateRepository(
+ s.adminCtx,
+ "test-owner2",
+ "test-repo",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ repos, err := s.Store.ListRepositories(
+ s.adminCtx,
+ params.RepositoryFilter{
+ Name: "test-repo",
+ })
+
+ s.Require().Nil(err)
+ s.equalReposByName([]params.Repository{repo, repo2, repo4}, repos)
+
+ repos, err = s.Store.ListRepositories(
+ s.adminCtx,
+ params.RepositoryFilter{
+ Name: "test-repo",
+ Owner: "test-owner",
+ })
+
+ s.Require().Nil(err)
+ s.equalReposByName([]params.Repository{repo, repo2}, repos)
+
+ repos, err = s.Store.ListRepositories(
+ s.adminCtx,
+ params.RepositoryFilter{
+ Name: "test-repo",
+ Owner: "test-owner",
+ Endpoint: s.giteaEndpoint.Name,
+ })
+
+ s.Require().Nil(err)
+ s.equalReposByName([]params.Repository{repo2}, repos)
+
+ repos, err = s.Store.ListRepositories(
+ s.adminCtx,
+ params.RepositoryFilter{
+ Name: "test-repo2",
+ })
+
+ s.Require().Nil(err)
+ s.equalReposByName([]params.Repository{repo3}, repos)
+}
+
func (s *RepoTestSuite) TestListRepositoriesDBFetchErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE `repositories`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("fetching user from database mock error"))
- _, err := s.StoreSQLMocked.ListRepositories(context.Background())
+ _, err := s.StoreSQLMocked.ListRepositories(s.adminCtx, params.RepositoryFilter{})
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching user from database: fetching user from database mock error", err.Error())
+ s.Require().Equal("error fetching user from database: fetching user from database mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestListRepositoriesDBDecryptingErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE `repositories`.`deleted_at` IS NULL")).
WillReturnRows(sqlmock.NewRows([]string{"id", "webhook_secret"}).AddRow(s.Fixtures.Repos[0].ID, s.Fixtures.Repos[0].WebhookSecret))
- _, err := s.StoreSQLMocked.ListRepositories(context.Background())
+ _, err := s.StoreSQLMocked.ListRepositories(s.adminCtx, params.RepositoryFilter{})
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching repositories: decrypting secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error fetching repositories: error decrypting secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestDeleteRepository() {
- err := s.Store.DeleteRepository(context.Background(), s.Fixtures.Repos[0].ID)
+ err := s.Store.DeleteRepository(s.adminCtx, s.Fixtures.Repos[0].ID)
s.Require().Nil(err)
- _, err = s.Store.GetRepositoryByID(context.Background(), s.Fixtures.Repos[0].ID)
+ _, err = s.Store.GetRepositoryByID(s.adminCtx, s.Fixtures.Repos[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: not found", err.Error())
+ s.Require().Equal("error fetching repo: not found", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryInvalidRepoID() {
- err := s.Store.DeleteRepository(context.Background(), "dummy-repo-id")
+ err := s.Store.DeleteRepository(s.adminCtx, "dummy-repo-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching repo: error parsing id: invalid UUID length: 13", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryDBRemoveErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -337,112 +517,151 @@ func (s *RepoTestSuite) TestDeleteRepositoryDBRemoveErr() {
WillReturnError(fmt.Errorf("mocked deleting repo error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteRepository(context.Background(), s.Fixtures.Repos[0].ID)
+ err := s.StoreSQLMocked.DeleteRepository(s.adminCtx, s.Fixtures.Repos[0].ID)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("deleting repo: mocked deleting repo error", err.Error())
+ s.Require().Equal("error deleting repo: mocked deleting repo error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestUpdateRepository() {
- repo, err := s.Store.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
+ repo, err := s.Store.UpdateRepository(s.adminCtx, s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, repo.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, repo.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, repo.WebhookSecret)
}
func (s *RepoTestSuite) TestUpdateRepositoryInvalidRepoID() {
- _, err := s.Store.UpdateRepository(context.Background(), "dummy-repo-id", s.Fixtures.UpdateRepoParams)
+ _, err := s.Store.UpdateRepository(s.adminCtx, "dummy-repo-id", s.Fixtures.UpdateRepoParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error saving repo: error fetching repo: error parsing id: invalid UUID length: 13", err.Error())
}
func (s *RepoTestSuite) TestUpdateRepositoryDBEncryptErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
-
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- _, err := s.StoreSQLMocked.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Repos[0].ID, s.githubEndpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
+
+ _, err := s.StoreSQLMocked.UpdateRepository(s.adminCtx, s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving repo: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error saving repo: saving repo: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestUpdateRepositoryDBSaveErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Repos[0].ID, s.githubEndpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
s.Fixtures.SQLMock.
ExpectExec(("UPDATE `repositories` SET")).
WillReturnError(fmt.Errorf("saving repo mock error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateRepository(s.adminCtx, s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving repo: saving repo mock error", err.Error())
+ s.Require().Equal("error saving repo: error saving repo: saving repo mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestUpdateRepositoryDBDecryptingErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
- s.Fixtures.UpdateRepoParams.WebhookSecret = "webhook-secret"
-
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Repos[0].ID, s.githubEndpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateRepository(s.adminCtx, s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving repo: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error saving repo: saving repo: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestGetRepositoryByID() {
- repo, err := s.Store.GetRepositoryByID(context.Background(), s.Fixtures.Repos[0].ID)
+ repo, err := s.Store.GetRepositoryByID(s.adminCtx, s.Fixtures.Repos[0].ID)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Repos[0].ID, repo.ID)
}
func (s *RepoTestSuite) TestGetRepositoryByIDInvalidRepoID() {
- _, err := s.Store.GetRepositoryByID(context.Background(), "dummy-repo-id")
+ _, err := s.Store.GetRepositoryByID(s.adminCtx, "dummy-repo-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching repo: error parsing id: invalid UUID length: 13", err.Error())
}
func (s *RepoTestSuite) TestGetRepositoryByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repository_events` WHERE `repository_events`.`repo_id` = ? AND `repository_events`.`deleted_at` IS NULL")).
+ WithArgs(s.Fixtures.Repos[0].ID).
+ WillReturnRows(sqlmock.NewRows([]string{"repo_id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND `pools`.`deleted_at` IS NULL")).
WithArgs(s.Fixtures.Repos[0].ID).
WillReturnRows(sqlmock.NewRows([]string{"repo_id"}).AddRow(s.Fixtures.Repos[0].ID))
- _, err := s.StoreSQLMocked.GetRepositoryByID(context.Background(), s.Fixtures.Repos[0].ID)
+ _, err := s.StoreSQLMocked.GetRepositoryByID(s.adminCtx, s.Fixtures.Repos[0].ID)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: missing secret", err.Error())
+ s.Require().Equal("error fetching repo: missing secret", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestCreateRepositoryPool() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().Nil(err)
- repo, err := s.Store.GetRepositoryByID(context.Background(), s.Fixtures.Repos[0].ID)
+ repo, err := s.Store.GetRepositoryByID(s.adminCtx, s.Fixtures.Repos[0].ID)
if err != nil {
s.FailNow(fmt.Sprintf("cannot get repo by ID: %v", err))
}
@@ -455,216 +674,122 @@ func (s *RepoTestSuite) TestCreateRepositoryPool() {
func (s *RepoTestSuite) TestCreateRepositoryPoolMissingTags() {
s.Fixtures.CreatePoolParams.Tags = []string{}
-
- _, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
s.Require().Equal("no tags specified", err.Error())
}
func (s *RepoTestSuite) TestCreateRepositoryPoolInvalidRepoID() {
- _, err := s.Store.CreateRepositoryPool(context.Background(), "dummy-repo-id", s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
-}
-
-func (s *RepoTestSuite) TestCreateRepositoryPoolDBCreateErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WillReturnError(fmt.Errorf("mocked creating pool error"))
-
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("creating pool: fetching pool: mocked creating pool error", err.Error())
-}
-
-func (s *RepoTestSuite) TestCreateRepositoryPoolDBPoolAlreadyExistErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id", "provider_name", "image", "flavor"}).
- AddRow(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor))
-
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("pool with the same image and flavor already exists on this provider", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchTagErr() {
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching tag: fetching tag from database: mocked fetching tag error", err.Error())
+ s.Require().Equal("error creating tag: error fetching tag from database: mocked fetching tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestCreateRepositoryPoolDBAddingPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnError(fmt.Errorf("mocked adding pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("adding pool: mocked adding pool error", err.Error())
+ s.Require().Equal("error creating pool: mocked adding pool error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestCreateRepositoryPoolDBSaveTagErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnError(fmt.Errorf("mocked saving tag error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
- s.assertSQLMockExpectations()
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("saving tag: mocked saving tag error", err.Error())
+ s.Require().Equal("error associating tags: mocked saving tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnResult(sqlmock.NewResult(1, 1))
@@ -676,160 +801,165 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: not found", err.Error())
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestListRepoPools() {
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
repoPools := []params.Pool{}
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Flavor = fmt.Sprintf("test-flavor-%d", i)
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
repoPools = append(repoPools, pool)
}
- pools, err := s.Store.ListRepoPools(context.Background(), s.Fixtures.Repos[0].ID)
+ pools, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().Nil(err)
garmTesting.EqualDBEntityID(s.T(), repoPools, pools)
}
func (s *RepoTestSuite) TestListRepoPoolsInvalidRepoID() {
- _, err := s.Store.ListRepoPools(context.Background(), "dummy-repo-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching pools: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pools: error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestGetRepositoryPool() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
- repoPool, err := s.Store.GetRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, pool.ID)
+ repoPool, err := s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
s.Require().Equal(repoPool.ID, pool.ID)
}
func (s *RepoTestSuite) TestGetRepositoryPoolInvalidRepoID() {
- _, err := s.Store.GetRepositoryPool(context.Background(), "dummy-repo-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.GetEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("fetching pool: error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryPool() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
- err = s.Store.DeleteRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, pool.ID)
+ err = s.Store.DeleteEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
- _, err = s.Store.GetOrganizationPool(context.Background(), s.Fixtures.Repos[0].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryPoolInvalidRepoID() {
- err := s.Store.DeleteRepositoryPool(context.Background(), "dummy-repo-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ err := s.Store.DeleteEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("looking up repo pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryPoolDBDeleteErr() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (id = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID, s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id", "id"}).AddRow(s.Fixtures.Repos[0].ID, pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE `pools`.`id` = ?")).
- WithArgs(pool.ID).
+ ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE id = ? and repo_id = ?")).
+ WithArgs(pool.ID, s.Fixtures.Repos[0].ID).
WillReturnError(fmt.Errorf("mocked deleting pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- err = s.StoreSQLMocked.DeleteRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, pool.ID)
-
+ err = s.StoreSQLMocked.DeleteEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().NotNil(err)
+ s.Require().Equal("error removing pool: mocked deleting pool error", err.Error())
s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("deleting pool: mocked deleting pool error", err.Error())
-}
-
-func (s *RepoTestSuite) TestFindRepositoryPoolByTags() {
- repoPool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
- if err != nil {
- s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
- }
-
- pool, err := s.Store.FindRepositoryPoolByTags(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams.Tags)
- s.Require().Nil(err)
- s.Require().Equal(repoPool.ID, pool.ID)
- s.Require().Equal(repoPool.Image, pool.Image)
- s.Require().Equal(repoPool.Flavor, pool.Flavor)
-}
-
-func (s *RepoTestSuite) TestFindRepositoryPoolByTagsMissingTags() {
- tags := []string{}
-
- _, err := s.Store.FindRepositoryPoolByTags(context.Background(), s.Fixtures.Repos[0].ID, tags)
-
- s.Require().NotNil(err)
- s.Require().Equal("fetching pool: missing tags", err.Error())
}
func (s *RepoTestSuite) TestListRepoInstances() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
poolInstances := []params.Instance{}
for i := 1; i <= 3; i++ {
s.Fixtures.CreateInstanceParams.Name = fmt.Sprintf("test-repo-%d", i)
- instance, err := s.Store.CreateInstance(context.Background(), pool.ID, s.Fixtures.CreateInstanceParams)
+ instance, err := s.Store.CreateInstance(s.adminCtx, pool.ID, s.Fixtures.CreateInstanceParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create instance: %s", err))
}
poolInstances = append(poolInstances, instance)
}
- instances, err := s.Store.ListRepoInstances(context.Background(), s.Fixtures.Repos[0].ID)
+ instances, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().Nil(err)
s.equalInstancesByID(poolInstances, instances)
}
func (s *RepoTestSuite) TestListRepoInstancesInvalidRepoID() {
- _, err := s.Store.ListRepoInstances(context.Background(), "dummy-repo-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching entity: error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestUpdateRepositoryPool() {
- repoPool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ repoPool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
- pool, err := s.Store.UpdateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, repoPool.ID, s.Fixtures.UpdatePoolParams)
+ pool, err := s.Store.UpdateEntityPool(s.adminCtx, entity, repoPool.ID, s.Fixtures.UpdatePoolParams)
s.Require().Nil(err)
s.Require().Equal(*s.Fixtures.UpdatePoolParams.MaxRunners, pool.MaxRunners)
@@ -839,13 +969,39 @@ func (s *RepoTestSuite) TestUpdateRepositoryPool() {
}
func (s *RepoTestSuite) TestUpdateRepositoryPoolInvalidRepoID() {
- _, err := s.Store.UpdateRepositoryPool(context.Background(), "dummy-org-id", "dummy-repo-id", s.Fixtures.UpdatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.UpdateEntityPool(s.adminCtx, entity, "dummy-repo-id", s.Fixtures.UpdatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
+}
+
+func (s *RepoTestSuite) TestAddRepoEntityEvent() {
+ repo, err := s.Store.CreateRepository(
+ s.adminCtx,
+ s.Fixtures.CreateRepoParams.Owner,
+ s.Fixtures.CreateRepoParams.Name,
+ s.testCreds,
+ s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+
+ s.Require().Nil(err)
+ entity, err := repo.GetEntity()
+ s.Require().Nil(err)
+ err = s.Store.AddEntityEvent(s.adminCtx, entity, params.StatusEvent, params.EventInfo, "this is a test", 20)
+ s.Require().Nil(err)
+
+ repo, err = s.Store.GetRepositoryByID(s.adminCtx, repo.ID)
+ s.Require().Nil(err)
+ s.Require().Equal(1, len(repo.Events))
+ s.Require().Equal(params.StatusEvent, repo.Events[0].EventType)
+ s.Require().Equal(params.EventInfo, repo.Events[0].EventLevel)
+ s.Require().Equal("this is a test", repo.Events[0].Message)
}
func TestRepoTestSuite(t *testing.T) {
- t.Parallel()
suite.Run(t, new(RepoTestSuite))
}
diff --git a/database/sql/scaleset_instances.go b/database/sql/scaleset_instances.go
new file mode 100644
index 00000000..457c99b5
--- /dev/null
+++ b/database/sql/scaleset_instances.go
@@ -0,0 +1,86 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *sqlDatabase) CreateScaleSetInstance(_ context.Context, scaleSetID uint, param params.CreateInstanceParams) (instance params.Instance, err error) {
+ scaleSet, err := s.getScaleSetByID(s.conn, scaleSetID)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.InstanceEntityType, common.CreateOperation, instance)
+ }
+ }()
+
+ var secret []byte
+ if len(param.JitConfiguration) > 0 {
+ secret, err = s.marshalAndSeal(param.JitConfiguration)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error marshalling jit config: %w", err)
+ }
+ }
+
+ newInstance := Instance{
+ ScaleSet: scaleSet,
+ Name: param.Name,
+ Status: param.Status,
+ RunnerStatus: param.RunnerStatus,
+ OSType: param.OSType,
+ OSArch: param.OSArch,
+ CallbackURL: param.CallbackURL,
+ MetadataURL: param.MetadataURL,
+ GitHubRunnerGroup: param.GitHubRunnerGroup,
+ JitConfiguration: secret,
+ AgentID: param.AgentID,
+ }
+ q := s.conn.Create(&newInstance)
+ if q.Error != nil {
+ return params.Instance{}, fmt.Errorf("error creating instance: %w", q.Error)
+ }
+
+ return s.sqlToParamsInstance(newInstance)
+}
+
+func (s *sqlDatabase) ListScaleSetInstances(_ context.Context, scalesetID uint) ([]params.Instance, error) {
+ var instances []Instance
+ query := s.conn.
+ Preload("ScaleSet").
+ Preload("Job").
+ Where("scale_set_fk_id = ?", scalesetID)
+
+ if err := query.Find(&instances); err.Error != nil {
+ return nil, fmt.Errorf("error fetching instances: %w", err.Error)
+ }
+
+ var err error
+ ret := make([]params.Instance, len(instances))
+ for idx, inst := range instances {
+ ret[idx], err = s.sqlToParamsInstance(inst)
+ if err != nil {
+ return nil, fmt.Errorf("error converting instance: %w", err)
+ }
+ }
+ return ret, nil
+}
diff --git a/database/sql/scalesets.go b/database/sql/scalesets.go
new file mode 100644
index 00000000..5877ad5c
--- /dev/null
+++ b/database/sql/scalesets.go
@@ -0,0 +1,458 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/google/uuid"
+ "gorm.io/datatypes"
+ "gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *sqlDatabase) ListAllScaleSets(_ context.Context) ([]params.ScaleSet, error) {
+ var scaleSets []ScaleSet
+
+ q := s.conn.Model(&ScaleSet{}).
+ Preload("Organization").
+ Preload("Organization.Endpoint").
+ Preload("Repository").
+ Preload("Repository.Endpoint").
+ Preload("Enterprise").
+ Preload("Enterprise.Endpoint").
+ Omit("extra_specs").
+ Omit("status_messages").
+ Find(&scaleSets)
+ if q.Error != nil {
+ return nil, fmt.Errorf("error fetching all scale sets: %w", q.Error)
+ }
+
+ ret := make([]params.ScaleSet, len(scaleSets))
+ var err error
+ for idx, val := range scaleSets {
+ ret[idx], err = s.sqlToCommonScaleSet(val)
+ if err != nil {
+ return nil, fmt.Errorf("error converting scale sets: %w", err)
+ }
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) CreateEntityScaleSet(_ context.Context, entity params.ForgeEntity, param params.CreateScaleSetParams) (scaleSet params.ScaleSet, err error) {
+ if err := param.Validate(); err != nil {
+ return params.ScaleSet{}, fmt.Errorf("failed to validate create params: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.ScaleSetEntityType, common.CreateOperation, scaleSet)
+ }
+ }()
+
+ newScaleSet := ScaleSet{
+ Name: param.Name,
+ ScaleSetID: param.ScaleSetID,
+ DisableUpdate: param.DisableUpdate,
+ ProviderName: param.ProviderName,
+ RunnerPrefix: param.GetRunnerPrefix(),
+ MaxRunners: param.MaxRunners,
+ MinIdleRunners: param.MinIdleRunners,
+ RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
+ Image: param.Image,
+ Flavor: param.Flavor,
+ OSType: param.OSType,
+ OSArch: param.OSArch,
+ Enabled: param.Enabled,
+ GitHubRunnerGroup: param.GitHubRunnerGroup,
+ State: params.ScaleSetPendingCreate,
+ }
+
+ if len(param.ExtraSpecs) > 0 {
+ newScaleSet.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
+ }
+
+ entityID, err := uuid.Parse(entity.ID)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ switch entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ newScaleSet.RepoID = &entityID
+ case params.ForgeEntityTypeOrganization:
+ newScaleSet.OrgID = &entityID
+ case params.ForgeEntityTypeEnterprise:
+ newScaleSet.EnterpriseID = &entityID
+ }
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := s.hasGithubEntity(tx, entity.EntityType, entity.ID); err != nil {
+ return fmt.Errorf("error checking entity existence: %w", err)
+ }
+
+ q := tx.Create(&newScaleSet)
+ if q.Error != nil {
+ return fmt.Errorf("error creating scale set: %w", q.Error)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return params.ScaleSet{}, err
+ }
+
+ dbScaleSet, err := s.getScaleSetByID(s.conn, newScaleSet.ID, "Instances", "Enterprise", "Organization", "Repository")
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ return s.sqlToCommonScaleSet(dbScaleSet)
+}
+
+func (s *sqlDatabase) listEntityScaleSets(tx *gorm.DB, entityType params.ForgeEntityType, entityID string, preload ...string) ([]ScaleSet, error) {
+ if _, err := uuid.Parse(entityID); err != nil {
+ return nil, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err := s.hasGithubEntity(tx, entityType, entityID); err != nil {
+ return nil, fmt.Errorf("error checking entity existence: %w", err)
+ }
+
+ var preloadEntity string
+ var fieldName string
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ preloadEntity = repositoryFieldName
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ preloadEntity = organizationFieldName
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ preloadEntity = enterpriseFieldName
+ default:
+ return nil, fmt.Errorf("invalid entityType: %v", entityType)
+ }
+
+ q := tx
+ q = q.Preload(preloadEntity)
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ var scaleSets []ScaleSet
+ condition := fmt.Sprintf("%s = ?", fieldName)
+ err := q.Model(&ScaleSet{}).
+ Where(condition, entityID).
+ Omit("extra_specs").
+ Omit("status_messages").
+ Find(&scaleSets).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return []ScaleSet{}, nil
+ }
+ return nil, fmt.Errorf("error fetching scale sets: %w", err)
+ }
+
+ return scaleSets, nil
+}
+
+func (s *sqlDatabase) ListEntityScaleSets(_ context.Context, entity params.ForgeEntity) ([]params.ScaleSet, error) {
+ scaleSets, err := s.listEntityScaleSets(s.conn, entity.EntityType, entity.ID)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching scale sets: %w", err)
+ }
+
+ ret := make([]params.ScaleSet, len(scaleSets))
+ for idx, set := range scaleSets {
+ ret[idx], err = s.sqlToCommonScaleSet(set)
+ if err != nil {
+ return nil, fmt.Errorf("error conbverting scale set: %w", err)
+ }
+ }
+
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateEntityScaleSet(ctx context.Context, entity params.ForgeEntity, scaleSetID uint, param params.UpdateScaleSetParams, callback func(old, newSet params.ScaleSet) error) (updatedScaleSet params.ScaleSet, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.ScaleSetEntityType, common.UpdateOperation, updatedScaleSet)
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ scaleSet, err := s.getEntityScaleSet(tx, entity.EntityType, entity.ID, scaleSetID, "Instances")
+ if err != nil {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ old, err := s.sqlToCommonScaleSet(scaleSet)
+ if err != nil {
+ return fmt.Errorf("error converting scale set: %w", err)
+ }
+
+ updatedScaleSet, err = s.updateScaleSet(tx, scaleSet, param)
+ if err != nil {
+ return fmt.Errorf("error updating scale set: %w", err)
+ }
+
+ if callback != nil {
+ if err := callback(old, updatedScaleSet); err != nil {
+ return fmt.Errorf("error executing update callback: %w", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ScaleSet{}, err
+ }
+
+ updatedScaleSet, err = s.GetScaleSetByID(ctx, scaleSetID)
+ if err != nil {
+ return params.ScaleSet{}, err
+ }
+ return updatedScaleSet, nil
+}
+
+func (s *sqlDatabase) getEntityScaleSet(tx *gorm.DB, entityType params.ForgeEntityType, entityID string, scaleSetID uint, preload ...string) (ScaleSet, error) {
+ if entityID == "" {
+ return ScaleSet{}, fmt.Errorf("error missing entity id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if scaleSetID == 0 {
+ return ScaleSet{}, fmt.Errorf("error missing scaleset id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ var fieldName string
+ var entityField string
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ entityField = "Repository"
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ entityField = "Organization"
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ entityField = "Enterprise"
+ default:
+ return ScaleSet{}, fmt.Errorf("invalid entityType: %v", entityType)
+ }
+
+ q := tx
+ q = q.Preload(entityField)
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ var scaleSet ScaleSet
+ condition := fmt.Sprintf("id = ? and %s = ?", fieldName)
+ err := q.Model(&ScaleSet{}).
+ Where(condition, scaleSetID, entityID).
+ First(&scaleSet).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return ScaleSet{}, fmt.Errorf("error finding scale set: %w", runnerErrors.ErrNotFound)
+ }
+ return ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ return scaleSet, nil
+}
+
+func (s *sqlDatabase) updateScaleSet(tx *gorm.DB, scaleSet ScaleSet, param params.UpdateScaleSetParams) (params.ScaleSet, error) {
+ if param.Enabled != nil && scaleSet.Enabled != *param.Enabled {
+ scaleSet.Enabled = *param.Enabled
+ }
+
+ if param.State != nil && *param.State != scaleSet.State {
+ scaleSet.State = *param.State
+ }
+
+ if param.ExtendedState != nil && *param.ExtendedState != scaleSet.ExtendedState {
+ scaleSet.ExtendedState = *param.ExtendedState
+ }
+
+ if param.ScaleSetID != 0 {
+ scaleSet.ScaleSetID = param.ScaleSetID
+ }
+
+ if param.Name != "" {
+ scaleSet.Name = param.Name
+ }
+
+ if param.GitHubRunnerGroup != nil && *param.GitHubRunnerGroup != "" {
+ scaleSet.GitHubRunnerGroup = *param.GitHubRunnerGroup
+ }
+
+ if param.Flavor != "" {
+ scaleSet.Flavor = param.Flavor
+ }
+
+ if param.Image != "" {
+ scaleSet.Image = param.Image
+ }
+
+ if param.Prefix != "" {
+ scaleSet.RunnerPrefix = param.Prefix
+ }
+
+ if param.MaxRunners != nil {
+ scaleSet.MaxRunners = *param.MaxRunners
+ }
+
+ if param.MinIdleRunners != nil {
+ scaleSet.MinIdleRunners = *param.MinIdleRunners
+ }
+
+ if param.OSArch != "" {
+ scaleSet.OSArch = param.OSArch
+ }
+
+ if param.OSType != "" {
+ scaleSet.OSType = param.OSType
+ }
+
+ if param.ExtraSpecs != nil {
+ scaleSet.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
+ }
+
+ if param.RunnerBootstrapTimeout != nil && *param.RunnerBootstrapTimeout > 0 {
+ scaleSet.RunnerBootstrapTimeout = *param.RunnerBootstrapTimeout
+ }
+
+ if param.GitHubRunnerGroup != nil {
+ scaleSet.GitHubRunnerGroup = *param.GitHubRunnerGroup
+ }
+
+ if q := tx.Save(&scaleSet); q.Error != nil {
+ return params.ScaleSet{}, fmt.Errorf("error saving database entry: %w", q.Error)
+ }
+
+ return s.sqlToCommonScaleSet(scaleSet)
+}
+
+func (s *sqlDatabase) GetScaleSetByID(_ context.Context, scaleSet uint) (params.ScaleSet, error) {
+ set, err := s.getScaleSetByID(
+ s.conn,
+ scaleSet,
+ "Instances",
+ "Enterprise",
+ "Enterprise.Endpoint",
+ "Organization",
+ "Organization.Endpoint",
+ "Repository",
+ "Repository.Endpoint",
+ )
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error fetching scale set by ID: %w", err)
+ }
+ return s.sqlToCommonScaleSet(set)
+}
+
+func (s *sqlDatabase) DeleteScaleSetByID(_ context.Context, scaleSetID uint) (err error) {
+ var scaleSet params.ScaleSet
+ defer func() {
+ if err == nil && scaleSet.ID != 0 {
+ s.sendNotify(common.ScaleSetEntityType, common.DeleteOperation, scaleSet)
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ dbSet, err := s.getScaleSetByID(tx, scaleSetID, "Instances", "Enterprise", "Organization", "Repository")
+ if err != nil {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ if len(dbSet.Instances) > 0 {
+ return runnerErrors.NewBadRequestError("cannot delete scaleset with runners")
+ }
+ scaleSet, err = s.sqlToCommonScaleSet(dbSet)
+ if err != nil {
+ return fmt.Errorf("error converting scale set: %w", err)
+ }
+
+ if q := tx.Unscoped().Delete(&dbSet); q.Error != nil {
+ return fmt.Errorf("error deleting scale set: %w", q.Error)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error removing scale set: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) SetScaleSetLastMessageID(_ context.Context, scaleSetID uint, lastMessageID int64) (err error) {
+ var scaleSet params.ScaleSet
+ defer func() {
+ if err == nil && scaleSet.ID != 0 {
+ s.sendNotify(common.ScaleSetEntityType, common.UpdateOperation, scaleSet)
+ }
+ }()
+ if err := s.conn.Transaction(func(tx *gorm.DB) error {
+ dbSet, err := s.getScaleSetByID(tx, scaleSetID, "Instances", "Enterprise", "Organization", "Repository")
+ if err != nil {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+ dbSet.LastMessageID = lastMessageID
+ if err := tx.Save(&dbSet).Error; err != nil {
+ return fmt.Errorf("error saving database entry: %w", err)
+ }
+ scaleSet, err = s.sqlToCommonScaleSet(dbSet)
+ if err != nil {
+ return fmt.Errorf("error converting scale set: %w", err)
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("error setting last message ID: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) SetScaleSetDesiredRunnerCount(_ context.Context, scaleSetID uint, desiredRunnerCount int) (err error) {
+ var scaleSet params.ScaleSet
+ defer func() {
+ if err == nil && scaleSet.ID != 0 {
+ s.sendNotify(common.ScaleSetEntityType, common.UpdateOperation, scaleSet)
+ }
+ }()
+ if err := s.conn.Transaction(func(tx *gorm.DB) error {
+ dbSet, err := s.getScaleSetByID(tx, scaleSetID, "Instances", "Enterprise", "Organization", "Repository")
+ if err != nil {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+ dbSet.DesiredRunnerCount = desiredRunnerCount
+ if err := tx.Save(&dbSet).Error; err != nil {
+ return fmt.Errorf("error saving database entry: %w", err)
+ }
+ scaleSet, err = s.sqlToCommonScaleSet(dbSet)
+ if err != nil {
+ return fmt.Errorf("error converting scale set: %w", err)
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("error setting desired runner count: %w", err)
+ }
+ return nil
+}
diff --git a/database/sql/scalesets_test.go b/database/sql/scalesets_test.go
new file mode 100644
index 00000000..f1f9fbba
--- /dev/null
+++ b/database/sql/scalesets_test.go
@@ -0,0 +1,368 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type ScaleSetsTestSuite struct {
+ suite.Suite
+ Store dbCommon.Store
+ adminCtx context.Context
+ creds params.ForgeCredentials
+
+ org params.Organization
+ repo params.Repository
+ enterprise params.Enterprise
+
+ orgEntity params.ForgeEntity
+ repoEntity params.ForgeEntity
+ enterpriseEntity params.ForgeEntity
+}
+
+func (s *ScaleSetsTestSuite) SetupTest() {
+ // create testing sqlite database
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
+
+ db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
+ }
+ s.Store = db
+
+ adminCtx := garmTesting.ImpersonateAdminContext(ctx, db, s.T())
+ s.adminCtx = adminCtx
+
+ githubEndpoint := garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.creds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), githubEndpoint)
+
+ // create an organization for testing purposes
+ s.org, err = s.Store.CreateOrganization(s.adminCtx, "test-org", s.creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create org: %s", err))
+ }
+
+ s.repo, err = s.Store.CreateRepository(s.adminCtx, "test-org", "test-repo", s.creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create repo: %s", err))
+ }
+
+ s.enterprise, err = s.Store.CreateEnterprise(s.adminCtx, "test-enterprise", s.creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create enterprise: %s", err))
+ }
+
+ s.orgEntity, err = s.org.GetEntity()
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get org entity: %s", err))
+ }
+
+ s.repoEntity, err = s.repo.GetEntity()
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get repo entity: %s", err))
+ }
+
+ s.enterpriseEntity, err = s.enterprise.GetEntity()
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get enterprise entity: %s", err))
+ }
+
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteOrganization(s.adminCtx, s.org.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete org: %s", err))
+ }
+ err = s.Store.DeleteRepository(s.adminCtx, s.repo.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete repo: %s", err))
+ }
+ err = s.Store.DeleteEnterprise(s.adminCtx, s.enterprise.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete enterprise: %s", err))
+ }
+ })
+}
+
+func (s *ScaleSetsTestSuite) TearDownTest() {
+ watcher.CloseWatcher()
+}
+
+func (s *ScaleSetsTestSuite) callback(old, newSet params.ScaleSet) error {
+ s.Require().Equal(old.Name, "test-scaleset")
+ s.Require().Equal(newSet.Name, "test-scaleset-updated")
+ s.Require().Equal(old.OSType, commonParams.Linux)
+ s.Require().Equal(newSet.OSType, commonParams.Windows)
+ s.Require().Equal(old.OSArch, commonParams.Amd64)
+ s.Require().Equal(newSet.OSArch, commonParams.Arm64)
+ s.Require().Equal(old.ExtraSpecs, json.RawMessage(`{"test": 1}`))
+ s.Require().Equal(newSet.ExtraSpecs, json.RawMessage(`{"test": 111}`))
+ s.Require().Equal(old.MaxRunners, uint(10))
+ s.Require().Equal(newSet.MaxRunners, uint(60))
+ s.Require().Equal(old.MinIdleRunners, uint(5))
+ s.Require().Equal(newSet.MinIdleRunners, uint(50))
+ s.Require().Equal(old.Image, "test-image")
+ s.Require().Equal(newSet.Image, "new-test-image")
+ s.Require().Equal(old.Flavor, "test-flavor")
+ s.Require().Equal(newSet.Flavor, "new-test-flavor")
+ s.Require().Equal(old.GitHubRunnerGroup, "test-group")
+ s.Require().Equal(newSet.GitHubRunnerGroup, "new-test-group")
+ s.Require().Equal(old.RunnerPrefix.Prefix, "garm")
+ s.Require().Equal(newSet.RunnerPrefix.Prefix, "test-prefix2")
+ s.Require().Equal(old.Enabled, false)
+ s.Require().Equal(newSet.Enabled, true)
+ return nil
+}
+
+func (s *ScaleSetsTestSuite) TestScaleSetOperations() {
+ // create a scale set for the organization
+ createScaleSetPrams := params.CreateScaleSetParams{
+ Name: "test-scaleset",
+ ProviderName: "test-provider",
+ MaxRunners: 10,
+ MinIdleRunners: 5,
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ ExtraSpecs: json.RawMessage(`{"test": 1}`),
+ GitHubRunnerGroup: "test-group",
+ }
+
+ var orgScaleSet params.ScaleSet
+ var repoScaleSet params.ScaleSet
+ var enterpriseScaleSet params.ScaleSet
+ var err error
+
+ s.T().Run("create org scaleset", func(_ *testing.T) {
+ orgScaleSet, err = s.Store.CreateEntityScaleSet(s.adminCtx, s.orgEntity, createScaleSetPrams)
+ s.Require().NoError(err)
+ s.Require().NotNil(orgScaleSet)
+ s.Require().Equal(orgScaleSet.Name, createScaleSetPrams.Name)
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteScaleSetByID(s.adminCtx, orgScaleSet.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete scaleset: %s", err))
+ }
+ })
+ })
+
+ s.T().Run("create repo scaleset", func(_ *testing.T) {
+ repoScaleSet, err = s.Store.CreateEntityScaleSet(s.adminCtx, s.repoEntity, createScaleSetPrams)
+ s.Require().NoError(err)
+ s.Require().NotNil(repoScaleSet)
+ s.Require().Equal(repoScaleSet.Name, createScaleSetPrams.Name)
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteScaleSetByID(s.adminCtx, repoScaleSet.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete scaleset: %s", err))
+ }
+ })
+ })
+
+ s.T().Run("create enterprise scaleset", func(_ *testing.T) {
+ enterpriseScaleSet, err = s.Store.CreateEntityScaleSet(s.adminCtx, s.enterpriseEntity, createScaleSetPrams)
+ s.Require().NoError(err)
+ s.Require().NotNil(enterpriseScaleSet)
+ s.Require().Equal(enterpriseScaleSet.Name, createScaleSetPrams.Name)
+
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteScaleSetByID(s.adminCtx, enterpriseScaleSet.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete scaleset: %s", err))
+ }
+ })
+ })
+
+ s.T().Run("create list all scalesets", func(_ *testing.T) {
+ allScaleSets, err := s.Store.ListAllScaleSets(s.adminCtx)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(allScaleSets)
+ s.Require().Len(allScaleSets, 3)
+ })
+
+ s.T().Run("list repo scalesets", func(_ *testing.T) {
+ repoScaleSets, err := s.Store.ListEntityScaleSets(s.adminCtx, s.repoEntity)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repoScaleSets)
+ s.Require().Len(repoScaleSets, 1)
+ })
+
+ s.T().Run("list org scalesets", func(_ *testing.T) {
+ orgScaleSets, err := s.Store.ListEntityScaleSets(s.adminCtx, s.orgEntity)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(orgScaleSets)
+ s.Require().Len(orgScaleSets, 1)
+ })
+
+ s.T().Run("list enterprise scalesets", func(_ *testing.T) {
+ enterpriseScaleSets, err := s.Store.ListEntityScaleSets(s.adminCtx, s.enterpriseEntity)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(enterpriseScaleSets)
+ s.Require().Len(enterpriseScaleSets, 1)
+ })
+
+ s.T().Run("get repo scaleset by ID", func(_ *testing.T) {
+ repoScaleSetByID, err := s.Store.GetScaleSetByID(s.adminCtx, repoScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(repoScaleSetByID)
+ s.Require().Equal(repoScaleSetByID.ID, repoScaleSet.ID)
+ s.Require().Equal(repoScaleSetByID.Name, repoScaleSet.Name)
+ })
+
+ s.T().Run("get org scaleset by ID", func(_ *testing.T) {
+ orgScaleSetByID, err := s.Store.GetScaleSetByID(s.adminCtx, orgScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(orgScaleSetByID)
+ s.Require().Equal(orgScaleSetByID.ID, orgScaleSet.ID)
+ s.Require().Equal(orgScaleSetByID.Name, orgScaleSet.Name)
+ })
+
+ s.T().Run("get enterprise scaleset by ID", func(_ *testing.T) {
+ enterpriseScaleSetByID, err := s.Store.GetScaleSetByID(s.adminCtx, enterpriseScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(enterpriseScaleSetByID)
+ s.Require().Equal(enterpriseScaleSetByID.ID, enterpriseScaleSet.ID)
+ s.Require().Equal(enterpriseScaleSetByID.Name, enterpriseScaleSet.Name)
+ })
+
+ s.T().Run("get scaleset by ID not found", func(_ *testing.T) {
+ _, err = s.Store.GetScaleSetByID(s.adminCtx, 999)
+ s.Require().Error(err)
+ s.Require().Contains(err.Error(), "not found")
+ })
+
+ s.T().Run("Set scale set last message ID and desired count", func(_ *testing.T) {
+ err = s.Store.SetScaleSetLastMessageID(s.adminCtx, orgScaleSet.ID, 20)
+ s.Require().NoError(err)
+ err = s.Store.SetScaleSetDesiredRunnerCount(s.adminCtx, orgScaleSet.ID, 5)
+ s.Require().NoError(err)
+ orgScaleSetByID, err := s.Store.GetScaleSetByID(s.adminCtx, orgScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(orgScaleSetByID)
+ s.Require().Equal(orgScaleSetByID.LastMessageID, int64(20))
+ s.Require().Equal(orgScaleSetByID.DesiredRunnerCount, 5)
+ })
+
+ updateParams := params.UpdateScaleSetParams{
+ Name: "test-scaleset-updated",
+ RunnerPrefix: params.RunnerPrefix{
+ Prefix: "test-prefix2",
+ },
+ OSType: commonParams.Windows,
+ OSArch: commonParams.Arm64,
+ ExtraSpecs: json.RawMessage(`{"test": 111}`),
+ Enabled: garmTesting.Ptr(true),
+ MaxRunners: garmTesting.Ptr(uint(60)),
+ MinIdleRunners: garmTesting.Ptr(uint(50)),
+ Image: "new-test-image",
+ Flavor: "new-test-flavor",
+ GitHubRunnerGroup: garmTesting.Ptr("new-test-group"),
+ }
+
+ s.T().Run("update repo scaleset", func(_ *testing.T) {
+ newRepoScaleSet, err := s.Store.UpdateEntityScaleSet(s.adminCtx, s.repoEntity, repoScaleSet.ID, updateParams, s.callback)
+ s.Require().NoError(err)
+ s.Require().NotNil(newRepoScaleSet)
+ s.Require().NoError(s.callback(repoScaleSet, newRepoScaleSet))
+ })
+
+ s.T().Run("update org scaleset", func(_ *testing.T) {
+ newOrgScaleSet, err := s.Store.UpdateEntityScaleSet(s.adminCtx, s.orgEntity, orgScaleSet.ID, updateParams, s.callback)
+ s.Require().NoError(err)
+ s.Require().NotNil(newOrgScaleSet)
+ s.Require().NoError(s.callback(orgScaleSet, newOrgScaleSet))
+ })
+
+ s.T().Run("update enterprise scaleset", func(_ *testing.T) {
+ newEnterpriseScaleSet, err := s.Store.UpdateEntityScaleSet(s.adminCtx, s.enterpriseEntity, enterpriseScaleSet.ID, updateParams, s.callback)
+ s.Require().NoError(err)
+ s.Require().NotNil(newEnterpriseScaleSet)
+ s.Require().NoError(s.callback(enterpriseScaleSet, newEnterpriseScaleSet))
+ })
+
+ s.T().Run("update scaleset not found", func(_ *testing.T) {
+ _, err = s.Store.UpdateEntityScaleSet(s.adminCtx, s.enterpriseEntity, 99999, updateParams, s.callback)
+ s.Require().Error(err)
+ s.Require().Contains(err.Error(), "not found")
+ })
+
+ s.T().Run("update scaleset with invalid entity", func(_ *testing.T) {
+ _, err = s.Store.UpdateEntityScaleSet(s.adminCtx, params.ForgeEntity{}, enterpriseScaleSet.ID, params.UpdateScaleSetParams{}, nil)
+ s.Require().Error(err)
+ s.Require().Contains(err.Error(), "missing entity id")
+ })
+
+ s.T().Run("Create repo scale set instance", func(_ *testing.T) {
+ param := params.CreateInstanceParams{
+ Name: "test-instance",
+ Status: commonParams.InstancePendingCreate,
+ RunnerStatus: params.RunnerPending,
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ CallbackURL: "http://localhost:8080/callback",
+ MetadataURL: "http://localhost:8080/metadata",
+ GitHubRunnerGroup: "test-group",
+ JitConfiguration: map[string]string{
+ "test": "test",
+ },
+ AgentID: 5,
+ }
+
+ instance, err := s.Store.CreateScaleSetInstance(s.adminCtx, repoScaleSet.ID, param)
+ s.Require().NoError(err)
+ s.Require().NotNil(instance)
+ s.Require().Equal(instance.Name, param.Name)
+ s.Require().Equal(instance.Status, param.Status)
+ s.Require().Equal(instance.RunnerStatus, param.RunnerStatus)
+ s.Require().Equal(instance.OSType, param.OSType)
+ s.Require().Equal(instance.OSArch, param.OSArch)
+ s.Require().Equal(instance.CallbackURL, param.CallbackURL)
+ s.Require().Equal(instance.MetadataURL, param.MetadataURL)
+ s.Require().Equal(instance.GitHubRunnerGroup, param.GitHubRunnerGroup)
+ s.Require().Equal(instance.JitConfiguration, param.JitConfiguration)
+ s.Require().Equal(instance.AgentID, param.AgentID)
+
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteInstanceByName(s.adminCtx, instance.Name)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete scaleset instance: %s", err))
+ }
+ })
+ })
+
+ s.T().Run("List repo scale set instances", func(_ *testing.T) {
+ instances, err := s.Store.ListScaleSetInstances(s.adminCtx, repoScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(instances)
+ s.Require().Len(instances, 1)
+ })
+}
+
+func TestScaleSetsTestSuite(t *testing.T) {
+ suite.Run(t, new(ScaleSetsTestSuite))
+}
diff --git a/database/sql/sql.go b/database/sql/sql.go
index ac3149e9..7d1fc96c 100644
--- a/database/sql/sql.go
+++ b/database/sql/sql.go
@@ -16,25 +16,37 @@ package sql
import (
"context"
+ "errors"
"fmt"
- "log"
+ "log/slog"
+ "net/url"
"strings"
- "github.com/pkg/errors"
"gorm.io/driver/mysql"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/util/appdefaults"
+)
+
+const (
+ repositoryFieldName string = "Repository"
+ organizationFieldName string = "Organization"
+ enterpriseFieldName string = "Enterprise"
)
// newDBConn returns a new gorm db connection, given the config
func newDBConn(dbCfg config.Database) (conn *gorm.DB, err error) {
dbType, connURI, err := dbCfg.GormParams()
if err != nil {
- return nil, errors.Wrap(err, "getting DB URI string")
+ return nil, fmt.Errorf("error getting DB URI string: %w", err)
}
gormConfig := &gorm.Config{}
@@ -49,7 +61,7 @@ func newDBConn(dbCfg config.Database) (conn *gorm.DB, err error) {
conn, err = gorm.Open(sqlite.Open(connURI), gormConfig)
}
if err != nil {
- return nil, errors.Wrap(err, "connecting to database")
+ return nil, fmt.Errorf("error connecting to database: %w", err)
}
if dbCfg.Debug {
@@ -61,24 +73,30 @@ func newDBConn(dbCfg config.Database) (conn *gorm.DB, err error) {
func NewSQLDatabase(ctx context.Context, cfg config.Database) (common.Store, error) {
conn, err := newDBConn(cfg)
if err != nil {
- return nil, errors.Wrap(err, "creating DB connection")
+ return nil, fmt.Errorf("error creating DB connection: %w", err)
+ }
+ producer, err := watcher.RegisterProducer(ctx, "sql")
+ if err != nil {
+ return nil, fmt.Errorf("error registering producer: %w", err)
}
db := &sqlDatabase{
- conn: conn,
- ctx: ctx,
- cfg: cfg,
+ conn: conn,
+ ctx: ctx,
+ cfg: cfg,
+ producer: producer,
}
if err := db.migrateDB(); err != nil {
- return nil, errors.Wrap(err, "migrating database")
+ return nil, fmt.Errorf("error migrating database: %w", err)
}
return db, nil
}
type sqlDatabase struct {
- conn *gorm.DB
- ctx context.Context
- cfg config.Database
+ conn *gorm.DB
+ ctx context.Context
+ cfg config.Database
+ producer common.Producer
}
var renameTemplate = `
@@ -151,7 +169,7 @@ func (s *sqlDatabase) cascadeMigrationSQLite(model interface{}, name string, jus
if model != nil {
if err := s.conn.Migrator().AutoMigrate(model); err != nil {
if err := s.conn.Exec(fmt.Sprintf(restoreNameTemplate, name, name, name)).Error; err != nil {
- log.Printf("failed to restore table %s: %s", name, err)
+ slog.With(slog.Any("error", err)).Error("failed to restore table", "table", name)
}
return fmt.Errorf("failed to create table %s: %w", name, err)
}
@@ -190,52 +208,291 @@ func (s *sqlDatabase) cascadeMigration() error {
return nil
}
+func (s *sqlDatabase) ensureGithubEndpoint() error {
+ // Create the default Github endpoint.
+ createEndpointParams := params.CreateGithubEndpointParams{
+ Name: "github.com",
+ Description: "The github.com endpoint",
+ APIBaseURL: appdefaults.GithubDefaultBaseURL,
+ BaseURL: appdefaults.DefaultGithubURL,
+ UploadBaseURL: appdefaults.GithubDefaultUploadBaseURL,
+ }
+
+ var epCount int64
+ if err := s.conn.Model(&GithubEndpoint{}).Count(&epCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error counting github endpoints: %w", err)
+ }
+ }
+
+ if epCount == 0 {
+ if _, err := s.CreateGithubEndpoint(context.Background(), createEndpointParams); err != nil {
+ if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ return fmt.Errorf("error creating default github endpoint: %w", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *sqlDatabase) migrateCredentialsToDB() (err error) {
+ s.conn.Exec("PRAGMA foreign_keys = OFF")
+ defer s.conn.Exec("PRAGMA foreign_keys = ON")
+
+ adminUser, err := s.GetAdminUser(s.ctx)
+ if err != nil {
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ // Admin user doesn't exist. This is a new deploy. Nothing to migrate.
+ return nil
+ }
+ return fmt.Errorf("error getting admin user: %w", err)
+ }
+
+ // Impersonate the admin user. We're migrating from config credentials to
+ // database credentials. At this point, there is no other user than the admin
+ // user. GARM is not yet multi-user, so it's safe to assume we only have this
+ // one user.
+ adminCtx := context.Background()
+ adminCtx = auth.PopulateContext(adminCtx, adminUser, nil)
+
+ slog.Info("migrating credentials to DB")
+ slog.Info("creating github endpoints table")
+ if err := s.conn.AutoMigrate(&GithubEndpoint{}); err != nil {
+ return fmt.Errorf("error migrating github endpoints: %w", err)
+ }
+
+ defer func() {
+ if err != nil {
+ slog.With(slog.Any("error", err)).Error("rolling back github github endpoints table")
+ s.conn.Migrator().DropTable(&GithubEndpoint{})
+ }
+ }()
+
+ slog.Info("creating github credentials table")
+ if err := s.conn.AutoMigrate(&GithubCredentials{}); err != nil {
+ return fmt.Errorf("error migrating github credentials: %w", err)
+ }
+
+ defer func() {
+ if err != nil {
+ slog.With(slog.Any("error", err)).Error("rolling back github github credentials table")
+ s.conn.Migrator().DropTable(&GithubCredentials{})
+ }
+ }()
+
+ // Nothing to migrate.
+ if len(s.cfg.MigrateCredentials) == 0 {
+ return nil
+ }
+
+ slog.Info("importing credentials from config")
+ for _, cred := range s.cfg.MigrateCredentials {
+ slog.Info("importing credential", "name", cred.Name)
+ parsed, err := url.Parse(cred.BaseEndpoint())
+ if err != nil {
+ return fmt.Errorf("error parsing base URL: %w", err)
+ }
+
+ certBundle, err := cred.CACertBundle()
+ if err != nil {
+ return fmt.Errorf("error getting CA cert bundle: %w", err)
+ }
+ hostname := parsed.Hostname()
+ createParams := params.CreateGithubEndpointParams{
+ Name: hostname,
+ Description: fmt.Sprintf("Endpoint for %s", hostname),
+ APIBaseURL: cred.APIEndpoint(),
+ BaseURL: cred.BaseEndpoint(),
+ UploadBaseURL: cred.UploadEndpoint(),
+ CACertBundle: certBundle,
+ }
+
+ var endpoint params.ForgeEndpoint
+ endpoint, err = s.GetGithubEndpoint(adminCtx, hostname)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("error getting github endpoint: %w", err)
+ }
+ endpoint, err = s.CreateGithubEndpoint(adminCtx, createParams)
+ if err != nil {
+ return fmt.Errorf("error creating default github endpoint: %w", err)
+ }
+ }
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: cred.Name,
+ Description: cred.Description,
+ Endpoint: endpoint.Name,
+ AuthType: params.ForgeAuthType(cred.GetAuthType()),
+ }
+ switch credParams.AuthType {
+ case params.ForgeAuthTypeApp:
+ keyBytes, err := cred.App.PrivateKeyBytes()
+ if err != nil {
+ return fmt.Errorf("error getting private key bytes: %w", err)
+ }
+ credParams.App = params.GithubApp{
+ AppID: cred.App.AppID,
+ InstallationID: cred.App.InstallationID,
+ PrivateKeyBytes: keyBytes,
+ }
+
+ if err := credParams.App.Validate(); err != nil {
+ return fmt.Errorf("error validating app credentials: %w", err)
+ }
+ case params.ForgeAuthTypePAT:
+ token := cred.PAT.OAuth2Token
+ if token == "" {
+ token = cred.OAuth2Token
+ }
+ if token == "" {
+ return errors.New("missing OAuth2 token")
+ }
+ credParams.PAT = params.GithubPAT{
+ OAuth2Token: token,
+ }
+ }
+
+ creds, err := s.CreateGithubCredentials(adminCtx, credParams)
+ if err != nil {
+ return fmt.Errorf("error creating github credentials: %w", err)
+ }
+
+ if err := s.conn.Exec("update repositories set credentials_id = ?,endpoint_name = ? where credentials_name = ?", creds.ID, creds.Endpoint.Name, creds.Name).Error; err != nil {
+ return fmt.Errorf("error updating repositories: %w", err)
+ }
+
+ if err := s.conn.Exec("update organizations set credentials_id = ?,endpoint_name = ? where credentials_name = ?", creds.ID, creds.Endpoint.Name, creds.Name).Error; err != nil {
+ return fmt.Errorf("error updating organizations: %w", err)
+ }
+
+ if err := s.conn.Exec("update enterprises set credentials_id = ?,endpoint_name = ? where credentials_name = ?", creds.ID, creds.Endpoint.Name, creds.Name).Error; err != nil {
+ return fmt.Errorf("error updating enterprises: %w", err)
+ }
+ }
+ return nil
+}
+
+func (s *sqlDatabase) migrateWorkflow() error {
+ if s.conn.Migrator().HasTable(&WorkflowJob{}) {
+ if s.conn.Migrator().HasColumn(&WorkflowJob{}, "runner_name") {
+ // Remove jobs that are not in "queued" status. We really only care about queued jobs. Once they transition
+ // to something else, we don't really consume them anyway.
+ if err := s.conn.Exec("delete from workflow_jobs where status is not 'queued'").Error; err != nil {
+ return fmt.Errorf("error updating workflow_jobs: %w", err)
+ }
+ if err := s.conn.Migrator().DropColumn(&WorkflowJob{}, "runner_name"); err != nil {
+ return fmt.Errorf("error updating workflow_jobs: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
func (s *sqlDatabase) migrateDB() error {
if s.conn.Migrator().HasIndex(&Organization{}, "idx_organizations_name") {
if err := s.conn.Migrator().DropIndex(&Organization{}, "idx_organizations_name"); err != nil {
- log.Printf("failed to drop index idx_organizations_name: %s", err)
+ slog.With(slog.Any("error", err)).Error("failed to drop index idx_organizations_name")
}
}
if s.conn.Migrator().HasIndex(&Repository{}, "idx_owner") {
if err := s.conn.Migrator().DropIndex(&Repository{}, "idx_owner"); err != nil {
- log.Printf("failed to drop index idx_owner: %s", err)
+ slog.With(slog.Any("error", err)).Error("failed to drop index idx_owner")
}
}
if err := s.cascadeMigration(); err != nil {
- return errors.Wrap(err, "running cascade migration")
+ return fmt.Errorf("error running cascade migration: %w", err)
}
if s.conn.Migrator().HasTable(&Pool{}) {
if err := s.conn.Exec("update pools set repo_id=NULL where repo_id='00000000-0000-0000-0000-000000000000'").Error; err != nil {
- return errors.Wrap(err, "updating pools")
+ return fmt.Errorf("error updating pools %w", err)
}
if err := s.conn.Exec("update pools set org_id=NULL where org_id='00000000-0000-0000-0000-000000000000'").Error; err != nil {
- return errors.Wrap(err, "updating pools")
+ return fmt.Errorf("error updating pools: %w", err)
}
if err := s.conn.Exec("update pools set enterprise_id=NULL where enterprise_id='00000000-0000-0000-0000-000000000000'").Error; err != nil {
- return errors.Wrap(err, "updating pools")
+ return fmt.Errorf("error updating pools: %w", err)
}
}
+ if err := s.migrateWorkflow(); err != nil {
+ return fmt.Errorf("error migrating workflows: %w", err)
+ }
+
+ if s.conn.Migrator().HasTable(&GithubEndpoint{}) {
+ if !s.conn.Migrator().HasColumn(&GithubEndpoint{}, "endpoint_type") {
+ if err := s.conn.Migrator().AutoMigrate(&GithubEndpoint{}); err != nil {
+ return fmt.Errorf("error migrating github endpoints: %w", err)
+ }
+ if err := s.conn.Exec("update github_endpoints set endpoint_type = 'github' where endpoint_type is null").Error; err != nil {
+ return fmt.Errorf("error updating github endpoints: %w", err)
+ }
+ }
+ }
+
+ var needsCredentialMigration bool
+ if !s.conn.Migrator().HasTable(&GithubCredentials{}) || !s.conn.Migrator().HasTable(&GithubEndpoint{}) {
+ needsCredentialMigration = true
+ }
+
+ var hasMinAgeField bool
+ if s.conn.Migrator().HasTable(&ControllerInfo{}) && s.conn.Migrator().HasColumn(&ControllerInfo{}, "minimum_job_age_backoff") {
+ hasMinAgeField = true
+ }
+
+ s.conn.Exec("PRAGMA foreign_keys = OFF")
if err := s.conn.AutoMigrate(
+ &User{},
+ &GithubEndpoint{},
+ &GithubCredentials{},
+ &GiteaCredentials{},
&Tag{},
&Pool{},
&Repository{},
&Organization{},
&Enterprise{},
+ &EnterpriseEvent{},
+ &OrganizationEvent{},
+ &RepositoryEvent{},
&Address{},
&InstanceStatusUpdate{},
&Instance{},
&ControllerInfo{},
- &User{},
&WorkflowJob{},
+ &ScaleSet{},
); err != nil {
- return errors.Wrap(err, "running auto migrate")
+ return fmt.Errorf("error running auto migrate: %w", err)
+ }
+ s.conn.Exec("PRAGMA foreign_keys = ON")
+
+ if !hasMinAgeField {
+ var controller ControllerInfo
+ if err := s.conn.First(&controller).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error updating controller info: %w", err)
+ }
+ } else {
+ controller.MinimumJobAgeBackoff = 30
+ if err := s.conn.Save(&controller).Error; err != nil {
+ return fmt.Errorf("error updating controller info: %w", err)
+ }
+ }
}
+ if err := s.ensureGithubEndpoint(); err != nil {
+ return fmt.Errorf("error ensuring github endpoint: %w", err)
+ }
+
+ if needsCredentialMigration {
+ if err := s.migrateCredentialsToDB(); err != nil {
+ return fmt.Errorf("error migrating credentials: %w", err)
+ }
+ }
return nil
}
diff --git a/database/sql/users.go b/database/sql/users.go
index 78922b80..ca78c5e8 100644
--- a/database/sql/users.go
+++ b/database/sql/users.go
@@ -16,17 +16,17 @@ package sql
import (
"context"
+ "errors"
"fmt"
+ "gorm.io/gorm"
+
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/params"
-
- "github.com/pkg/errors"
- "gorm.io/gorm"
)
-func (s *sqlDatabase) getUserByUsernameOrEmail(user string) (User, error) {
+func (s *sqlDatabase) getUserByUsernameOrEmail(tx *gorm.DB, user string) (User, error) {
field := "username"
if util.IsValidEmail(user) {
field = "email"
@@ -34,39 +34,32 @@ func (s *sqlDatabase) getUserByUsernameOrEmail(user string) (User, error) {
query := fmt.Sprintf("%s = ?", field)
var dbUser User
- q := s.conn.Model(&User{}).Where(query, user).First(&dbUser)
+ q := tx.Model(&User{}).Where(query, user).First(&dbUser)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return User{}, runnerErrors.ErrNotFound
}
- return User{}, errors.Wrap(q.Error, "fetching user")
+ return User{}, fmt.Errorf("error fetching user: %w", q.Error)
}
return dbUser, nil
}
-func (s *sqlDatabase) getUserByID(userID string) (User, error) {
+func (s *sqlDatabase) getUserByID(tx *gorm.DB, userID string) (User, error) {
var dbUser User
- q := s.conn.Model(&User{}).Where("id = ?", userID).First(&dbUser)
+ q := tx.Model(&User{}).Where("id = ?", userID).First(&dbUser)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return User{}, runnerErrors.ErrNotFound
}
- return User{}, errors.Wrap(q.Error, "fetching user")
+ return User{}, fmt.Errorf("error fetching user: %w", q.Error)
}
return dbUser, nil
}
-func (s *sqlDatabase) CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error) {
- if user.Username == "" || user.Email == "" {
- return params.User{}, runnerErrors.NewBadRequestError("missing username or email")
+func (s *sqlDatabase) CreateUser(_ context.Context, user params.NewUserParams) (params.User, error) {
+ if user.Username == "" || user.Email == "" || user.Password == "" {
+ return params.User{}, runnerErrors.NewBadRequestError("missing username, password or email")
}
- if _, err := s.getUserByUsernameOrEmail(user.Username); err == nil || !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.User{}, runnerErrors.NewConflictError("username already exists")
- }
- if _, err := s.getUserByUsernameOrEmail(user.Email); err == nil || !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.User{}, runnerErrors.NewConflictError("email already exists")
- }
-
newUser := User{
Username: user.Username,
Password: user.Password,
@@ -75,57 +68,98 @@ func (s *sqlDatabase) CreateUser(ctx context.Context, user params.NewUserParams)
Email: user.Email,
IsAdmin: user.IsAdmin,
}
+ err := s.conn.Transaction(func(tx *gorm.DB) error {
+ if _, err := s.getUserByUsernameOrEmail(tx, user.Username); err == nil || !errors.Is(err, runnerErrors.ErrNotFound) {
+ return runnerErrors.NewConflictError("username already exists")
+ }
+ if _, err := s.getUserByUsernameOrEmail(tx, user.Email); err == nil || !errors.Is(err, runnerErrors.ErrNotFound) {
+ return runnerErrors.NewConflictError("email already exists")
+ }
- q := s.conn.Save(&newUser)
- if q.Error != nil {
- return params.User{}, errors.Wrap(q.Error, "creating user")
+ if s.hasAdmin(tx) && user.IsAdmin {
+ return runnerErrors.NewBadRequestError("admin user already exists")
+ }
+
+ q := tx.Save(&newUser)
+ if q.Error != nil {
+ return fmt.Errorf("error creating user: %w", q.Error)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.User{}, fmt.Errorf("error creating user: %w", err)
}
return s.sqlToParamsUser(newUser), nil
}
-func (s *sqlDatabase) HasAdminUser(ctx context.Context) bool {
+func (s *sqlDatabase) hasAdmin(tx *gorm.DB) bool {
var user User
- q := s.conn.Model(&User{}).Where("is_admin = ?", true).First(&user)
+ q := tx.Model(&User{}).Where("is_admin = ?", true).First(&user)
return q.Error == nil
}
-func (s *sqlDatabase) GetUser(ctx context.Context, user string) (params.User, error) {
- dbUser, err := s.getUserByUsernameOrEmail(user)
+func (s *sqlDatabase) HasAdminUser(_ context.Context) bool {
+ return s.hasAdmin(s.conn)
+}
+
+func (s *sqlDatabase) GetUser(_ context.Context, user string) (params.User, error) {
+ dbUser, err := s.getUserByUsernameOrEmail(s.conn, user)
if err != nil {
- return params.User{}, errors.Wrap(err, "fetching user")
+ return params.User{}, fmt.Errorf("error fetching user: %w", err)
}
return s.sqlToParamsUser(dbUser), nil
}
-func (s *sqlDatabase) GetUserByID(ctx context.Context, userID string) (params.User, error) {
- dbUser, err := s.getUserByID(userID)
+func (s *sqlDatabase) GetUserByID(_ context.Context, userID string) (params.User, error) {
+ dbUser, err := s.getUserByID(s.conn, userID)
if err != nil {
- return params.User{}, errors.Wrap(err, "fetching user")
+ return params.User{}, fmt.Errorf("error fetching user: %w", err)
}
return s.sqlToParamsUser(dbUser), nil
}
-func (s *sqlDatabase) UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error) {
- dbUser, err := s.getUserByUsernameOrEmail(user)
+func (s *sqlDatabase) UpdateUser(_ context.Context, user string, param params.UpdateUserParams) (params.User, error) {
+ var err error
+ var dbUser User
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ dbUser, err = s.getUserByUsernameOrEmail(tx, user)
+ if err != nil {
+ return fmt.Errorf("error fetching user: %w", err)
+ }
+
+ if param.FullName != "" {
+ dbUser.FullName = param.FullName
+ }
+
+ if param.Enabled != nil {
+ dbUser.Enabled = *param.Enabled
+ }
+
+ if param.Password != "" {
+ dbUser.Password = param.Password
+ dbUser.Generation++
+ }
+
+ if q := tx.Save(&dbUser); q.Error != nil {
+ return fmt.Errorf("error saving user: %w", q.Error)
+ }
+ return nil
+ })
if err != nil {
- return params.User{}, errors.Wrap(err, "fetching user")
+ return params.User{}, fmt.Errorf("error updating user: %w", err)
}
-
- if param.FullName != "" {
- dbUser.FullName = param.FullName
- }
-
- if param.Enabled != nil {
- dbUser.Enabled = *param.Enabled
- }
-
- if param.Password != "" {
- dbUser.Password = param.Password
- }
-
- if q := s.conn.Save(&dbUser); q.Error != nil {
- return params.User{}, errors.Wrap(q.Error, "saving user")
- }
-
return s.sqlToParamsUser(dbUser), nil
}
+
+// GetAdminUser returns the system admin user. This is only for internal use.
+func (s *sqlDatabase) GetAdminUser(_ context.Context) (params.User, error) {
+ var user User
+ q := s.conn.Model(&User{}).Where("is_admin = ?", true).First(&user)
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return params.User{}, runnerErrors.ErrNotFound
+ }
+ return params.User{}, fmt.Errorf("error fetching admin user: %w", q.Error)
+ }
+ return s.sqlToParamsUser(user), nil
+}
diff --git a/database/sql/users_test.go b/database/sql/users_test.go
index 37105cb6..369abff3 100644
--- a/database/sql/users_test.go
+++ b/database/sql/users_test.go
@@ -21,14 +21,16 @@ import (
"regexp"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type UserTestFixtures struct {
@@ -52,7 +54,13 @@ func (s *UserTestSuite) assertSQLMockExpectations() {
}
}
+func (s *UserTestSuite) TearDownTest() {
+ watcher.CloseWatcher()
+}
+
func (s *UserTestSuite) SetupTest() {
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
// create testing sqlite database
db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
if err != nil {
@@ -90,7 +98,7 @@ func (s *UserTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -144,7 +152,7 @@ func (s *UserTestSuite) TestCreateUserMissingUsernameEmail() {
_, err := s.Store.CreateUser(context.Background(), s.Fixtures.NewUserParams)
s.Require().NotNil(err)
- s.Require().Equal(("missing username or email"), err.Error())
+ s.Require().Equal(("missing username, password or email"), err.Error())
}
func (s *UserTestSuite) TestCreateUserUsernameAlreadyExist() {
@@ -153,7 +161,7 @@ func (s *UserTestSuite) TestCreateUserUsernameAlreadyExist() {
_, err := s.Store.CreateUser(context.Background(), s.Fixtures.NewUserParams)
s.Require().NotNil(err)
- s.Require().Equal(("username already exists"), err.Error())
+ s.Require().Equal(("error creating user: username already exists"), err.Error())
}
func (s *UserTestSuite) TestCreateUserEmailAlreadyExist() {
@@ -162,19 +170,19 @@ func (s *UserTestSuite) TestCreateUserEmailAlreadyExist() {
_, err := s.Store.CreateUser(context.Background(), s.Fixtures.NewUserParams)
s.Require().NotNil(err)
- s.Require().Equal(("email already exists"), err.Error())
+ s.Require().Equal(("error creating user: email already exists"), err.Error())
}
func (s *UserTestSuite) TestCreateUserDBCreateErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.NewUserParams.Username).
- WillReturnRows(sqlmock.NewRows([]string{"id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE email = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.NewUserParams.Email).
- WillReturnRows(sqlmock.NewRows([]string{"id"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.NewUserParams.Username, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE email = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.NewUserParams.Email, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}))
s.Fixtures.SQLMock.
ExpectExec("INSERT INTO `users`").
WillReturnError(fmt.Errorf("creating user mock error"))
@@ -182,9 +190,9 @@ func (s *UserTestSuite) TestCreateUserDBCreateErr() {
_, err := s.StoreSQLMocked.CreateUser(context.Background(), s.Fixtures.NewUserParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating user: creating user mock error", err.Error())
+ s.Require().Equal("error creating user: error creating user: creating user mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *UserTestSuite) TestHasAdminUserNoAdmin() {
@@ -222,7 +230,7 @@ func (s *UserTestSuite) TestGetUserNotFound() {
_, err := s.Store.GetUser(context.Background(), "dummy-user")
s.Require().NotNil(err)
- s.Require().Equal("fetching user: not found", err.Error())
+ s.Require().Equal("error fetching user: not found", err.Error())
}
func (s *UserTestSuite) TestGetUserByID() {
@@ -236,7 +244,7 @@ func (s *UserTestSuite) TestGetUserByIDNotFound() {
_, err := s.Store.GetUserByID(context.Background(), "dummy-user-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching user: not found", err.Error())
+ s.Require().Equal("error fetching user: not found", err.Error())
}
func (s *UserTestSuite) TestUpdateUser() {
@@ -252,15 +260,15 @@ func (s *UserTestSuite) TestUpdateUserNotFound() {
_, err := s.Store.UpdateUser(context.Background(), "dummy-user", s.Fixtures.UpdateUserParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching user: not found", err.Error())
+ s.Require().Equal("error updating user: error fetching user: not found", err.Error())
}
func (s *UserTestSuite) TestUpdateUserDBSaveErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Users[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Users[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Users[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Users[0].ID))
s.Fixtures.SQLMock.
ExpectExec(("UPDATE `users` SET")).
WillReturnError(fmt.Errorf("saving user mock error"))
@@ -270,7 +278,7 @@ func (s *UserTestSuite) TestUpdateUserDBSaveErr() {
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving user: saving user mock error", err.Error())
+ s.Require().Equal("error updating user: error saving user: saving user mock error", err.Error())
}
func TestUserTestSuite(t *testing.T) {
diff --git a/database/sql/util.go b/database/sql/util.go
index fa6706d2..9509aacf 100644
--- a/database/sql/util.go
+++ b/database/sql/util.go
@@ -15,27 +15,42 @@
package sql
import (
+ "context"
"encoding/json"
+ "errors"
"fmt"
- "github.com/cloudbase/garm-provider-common/util"
- "github.com/cloudbase/garm/params"
-
- "github.com/pkg/errors"
+ "github.com/google/uuid"
"gorm.io/datatypes"
"gorm.io/gorm"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/auth"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) sqlToParamsInstance(instance Instance) params.Instance {
+func (s *sqlDatabase) sqlToParamsInstance(instance Instance) (params.Instance, error) {
var id string
if instance.ProviderID != nil {
id = *instance.ProviderID
}
var labels []string
- _ = json.Unmarshal(instance.AditionalLabels, &labels)
+ if len(instance.AditionalLabels) > 0 {
+ if err := json.Unmarshal(instance.AditionalLabels, &labels); err != nil {
+ return params.Instance{}, fmt.Errorf("error unmarshalling labels: %w", err)
+ }
+ }
+
+ var jitConfig map[string]string
+ if len(instance.JitConfiguration) > 0 {
+ if err := s.unsealAndUnmarshal(instance.JitConfiguration, &jitConfig); err != nil {
+ return params.Instance{}, fmt.Errorf("error unmarshalling jit configuration: %w", err)
+ }
+ }
ret := params.Instance{
ID: instance.ID.String(),
ProviderID: id,
@@ -47,17 +62,44 @@ func (s *sqlDatabase) sqlToParamsInstance(instance Instance) params.Instance {
OSArch: instance.OSArch,
Status: instance.Status,
RunnerStatus: instance.RunnerStatus,
- PoolID: instance.PoolID.String(),
CallbackURL: instance.CallbackURL,
MetadataURL: instance.MetadataURL,
StatusMessages: []params.StatusMessage{},
CreateAttempt: instance.CreateAttempt,
+ CreatedAt: instance.CreatedAt,
UpdatedAt: instance.UpdatedAt,
TokenFetched: instance.TokenFetched,
+ JitConfiguration: jitConfig,
GitHubRunnerGroup: instance.GitHubRunnerGroup,
AditionalLabels: labels,
}
+ if instance.ScaleSetFkID != nil {
+ ret.ScaleSetID = *instance.ScaleSetFkID
+ ret.ProviderName = instance.ScaleSet.ProviderName
+ }
+
+ if instance.PoolID != nil {
+ ret.PoolID = instance.PoolID.String()
+ ret.ProviderName = instance.Pool.ProviderName
+ }
+
+ if ret.ScaleSetID == 0 && ret.PoolID == "" {
+ return params.Instance{}, errors.New("missing pool or scale set id")
+ }
+
+ if ret.ScaleSetID != 0 && ret.PoolID != "" {
+ return params.Instance{}, errors.New("both pool and scale set ids are set")
+ }
+
+ if instance.Job != nil {
+ paramJob, err := sqlWorkflowJobToParamsJob(*instance.Job)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error converting job: %w", err)
+ }
+ ret.Job = ¶mJob
+ }
+
if len(instance.ProviderFault) > 0 {
ret.ProviderFault = instance.ProviderFault
}
@@ -74,7 +116,7 @@ func (s *sqlDatabase) sqlToParamsInstance(instance Instance) params.Instance {
EventLevel: msg.EventLevel,
})
}
- return ret
+ return ret, nil
}
func (s *sqlDatabase) sqlAddressToParamsAddress(addr Address) commonParams.Address {
@@ -84,55 +126,143 @@ func (s *sqlDatabase) sqlAddressToParamsAddress(addr Address) commonParams.Addre
}
}
-func (s *sqlDatabase) sqlToCommonOrganization(org Organization) (params.Organization, error) {
+func (s *sqlDatabase) sqlToCommonOrganization(org Organization, detailed bool) (params.Organization, error) {
if len(org.WebhookSecret) == 0 {
return params.Organization{}, errors.New("missing secret")
}
- secret, err := util.Aes256DecodeString(org.WebhookSecret, s.cfg.Passphrase)
+ secret, err := util.Unseal(org.WebhookSecret, []byte(s.cfg.Passphrase))
if err != nil {
- return params.Organization{}, errors.Wrap(err, "decrypting secret")
+ return params.Organization{}, fmt.Errorf("error decrypting secret: %w", err)
}
+ endpoint, err := s.sqlToCommonGithubEndpoint(org.Endpoint)
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
ret := params.Organization{
- ID: org.ID.String(),
- Name: org.Name,
- CredentialsName: org.CredentialsName,
- Pools: make([]params.Pool, len(org.Pools)),
- WebhookSecret: secret,
+ ID: org.ID.String(),
+ Name: org.Name,
+ CredentialsName: org.Credentials.Name,
+ Pools: make([]params.Pool, len(org.Pools)),
+ WebhookSecret: string(secret),
+ PoolBalancerType: org.PoolBalancerType,
+ Endpoint: endpoint,
+ CreatedAt: org.CreatedAt,
+ UpdatedAt: org.UpdatedAt,
+ }
+
+ var forgeCreds params.ForgeCredentials
+ if org.CredentialsID != nil {
+ ret.CredentialsID = *org.CredentialsID
+ forgeCreds, err = s.sqlToCommonForgeCredentials(org.Credentials)
+ }
+
+ if org.GiteaCredentialsID != nil {
+ ret.CredentialsID = *org.GiteaCredentialsID
+ forgeCreds, err = s.sqlGiteaToCommonForgeCredentials(org.GiteaCredentials)
+ }
+
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error converting credentials: %w", err)
+ }
+
+ if len(org.Events) > 0 {
+ ret.Events = make([]params.EntityEvent, len(org.Events))
+ for idx, event := range org.Events {
+ ret.Events[idx] = params.EntityEvent{
+ ID: event.ID,
+ Message: event.Message,
+ EventType: event.EventType,
+ EventLevel: event.EventLevel,
+ CreatedAt: event.CreatedAt,
+ }
+ }
+ }
+
+ if detailed {
+ ret.Credentials = forgeCreds
+ ret.CredentialsName = forgeCreds.Name
+ }
+
+ if ret.PoolBalancerType == "" {
+ ret.PoolBalancerType = params.PoolBalancerTypeRoundRobin
}
for idx, pool := range org.Pools {
- ret.Pools[idx] = s.sqlToCommonPool(pool)
+ ret.Pools[idx], err = s.sqlToCommonPool(pool)
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) sqlToCommonEnterprise(enterprise Enterprise) (params.Enterprise, error) {
+func (s *sqlDatabase) sqlToCommonEnterprise(enterprise Enterprise, detailed bool) (params.Enterprise, error) {
if len(enterprise.WebhookSecret) == 0 {
return params.Enterprise{}, errors.New("missing secret")
}
- secret, err := util.Aes256DecodeString(enterprise.WebhookSecret, s.cfg.Passphrase)
+ secret, err := util.Unseal(enterprise.WebhookSecret, []byte(s.cfg.Passphrase))
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "decrypting secret")
+ return params.Enterprise{}, fmt.Errorf("error decrypting secret: %w", err)
}
+ endpoint, err := s.sqlToCommonGithubEndpoint(enterprise.Endpoint)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
ret := params.Enterprise{
- ID: enterprise.ID.String(),
- Name: enterprise.Name,
- CredentialsName: enterprise.CredentialsName,
- Pools: make([]params.Pool, len(enterprise.Pools)),
- WebhookSecret: secret,
+ ID: enterprise.ID.String(),
+ Name: enterprise.Name,
+ CredentialsName: enterprise.Credentials.Name,
+ Pools: make([]params.Pool, len(enterprise.Pools)),
+ WebhookSecret: string(secret),
+ PoolBalancerType: enterprise.PoolBalancerType,
+ CreatedAt: enterprise.CreatedAt,
+ UpdatedAt: enterprise.UpdatedAt,
+ Endpoint: endpoint,
+ }
+
+ if enterprise.CredentialsID != nil {
+ ret.CredentialsID = *enterprise.CredentialsID
+ }
+
+ if len(enterprise.Events) > 0 {
+ ret.Events = make([]params.EntityEvent, len(enterprise.Events))
+ for idx, event := range enterprise.Events {
+ ret.Events[idx] = params.EntityEvent{
+ ID: event.ID,
+ Message: event.Message,
+ EventType: event.EventType,
+ EventLevel: event.EventLevel,
+ CreatedAt: event.CreatedAt,
+ }
+ }
+ }
+
+ if detailed {
+ creds, err := s.sqlToCommonForgeCredentials(enterprise.Credentials)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error converting credentials: %w", err)
+ }
+ ret.Credentials = creds
+ }
+
+ if ret.PoolBalancerType == "" {
+ ret.PoolBalancerType = params.PoolBalancerTypeRoundRobin
}
for idx, pool := range enterprise.Pools {
- ret.Pools[idx] = s.sqlToCommonPool(pool)
+ ret.Pools[idx], err = s.sqlToCommonPool(pool)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) sqlToCommonPool(pool Pool) params.Pool {
+func (s *sqlDatabase) sqlToCommonPool(pool Pool) (params.Pool, error) {
ret := params.Pool{
ID: pool.ID.String(),
ProviderName: pool.ProviderName,
@@ -151,34 +281,117 @@ func (s *sqlDatabase) sqlToCommonPool(pool Pool) params.Pool {
RunnerBootstrapTimeout: pool.RunnerBootstrapTimeout,
ExtraSpecs: json.RawMessage(pool.ExtraSpecs),
GitHubRunnerGroup: pool.GitHubRunnerGroup,
+ Priority: pool.Priority,
+ CreatedAt: pool.CreatedAt,
+ UpdatedAt: pool.UpdatedAt,
}
+ var ep GithubEndpoint
if pool.RepoID != nil {
ret.RepoID = pool.RepoID.String()
if pool.Repository.Owner != "" && pool.Repository.Name != "" {
ret.RepoName = fmt.Sprintf("%s/%s", pool.Repository.Owner, pool.Repository.Name)
}
+ ep = pool.Repository.Endpoint
}
if pool.OrgID != nil && pool.Organization.Name != "" {
ret.OrgID = pool.OrgID.String()
ret.OrgName = pool.Organization.Name
+ ep = pool.Organization.Endpoint
}
if pool.EnterpriseID != nil && pool.Enterprise.Name != "" {
ret.EnterpriseID = pool.EnterpriseID.String()
ret.EnterpriseName = pool.Enterprise.Name
+ ep = pool.Enterprise.Endpoint
}
+ endpoint, err := s.sqlToCommonGithubEndpoint(ep)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
+ ret.Endpoint = endpoint
+
for idx, val := range pool.Tags {
ret.Tags[idx] = s.sqlToCommonTags(*val)
}
for idx, inst := range pool.Instances {
- ret.Instances[idx] = s.sqlToParamsInstance(inst)
+ ret.Instances[idx], err = s.sqlToParamsInstance(inst)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error converting instance: %w", err)
+ }
}
- return ret
+ return ret, nil
+}
+
+func (s *sqlDatabase) sqlToCommonScaleSet(scaleSet ScaleSet) (params.ScaleSet, error) {
+ ret := params.ScaleSet{
+ ID: scaleSet.ID,
+ CreatedAt: scaleSet.CreatedAt,
+ UpdatedAt: scaleSet.UpdatedAt,
+ ScaleSetID: scaleSet.ScaleSetID,
+ Name: scaleSet.Name,
+ DisableUpdate: scaleSet.DisableUpdate,
+
+ ProviderName: scaleSet.ProviderName,
+ MaxRunners: scaleSet.MaxRunners,
+ MinIdleRunners: scaleSet.MinIdleRunners,
+ RunnerPrefix: params.RunnerPrefix{
+ Prefix: scaleSet.RunnerPrefix,
+ },
+ Image: scaleSet.Image,
+ Flavor: scaleSet.Flavor,
+ OSArch: scaleSet.OSArch,
+ OSType: scaleSet.OSType,
+ Enabled: scaleSet.Enabled,
+ Instances: make([]params.Instance, len(scaleSet.Instances)),
+ RunnerBootstrapTimeout: scaleSet.RunnerBootstrapTimeout,
+ ExtraSpecs: json.RawMessage(scaleSet.ExtraSpecs),
+ GitHubRunnerGroup: scaleSet.GitHubRunnerGroup,
+ State: scaleSet.State,
+ ExtendedState: scaleSet.ExtendedState,
+ LastMessageID: scaleSet.LastMessageID,
+ DesiredRunnerCount: scaleSet.DesiredRunnerCount,
+ }
+
+ var ep GithubEndpoint
+ if scaleSet.RepoID != nil {
+ ret.RepoID = scaleSet.RepoID.String()
+ if scaleSet.Repository.Owner != "" && scaleSet.Repository.Name != "" {
+ ret.RepoName = fmt.Sprintf("%s/%s", scaleSet.Repository.Owner, scaleSet.Repository.Name)
+ }
+ ep = scaleSet.Repository.Endpoint
+ }
+
+ if scaleSet.OrgID != nil {
+ ret.OrgID = scaleSet.OrgID.String()
+ ret.OrgName = scaleSet.Organization.Name
+ ep = scaleSet.Organization.Endpoint
+ }
+
+ if scaleSet.EnterpriseID != nil {
+ ret.EnterpriseID = scaleSet.EnterpriseID.String()
+ ret.EnterpriseName = scaleSet.Enterprise.Name
+ ep = scaleSet.Enterprise.Endpoint
+ }
+
+ endpoint, err := s.sqlToCommonGithubEndpoint(ep)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
+ ret.Endpoint = endpoint
+
+ for idx, inst := range scaleSet.Instances {
+ ret.Instances[idx], err = s.sqlToParamsInstance(inst)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error converting instance: %w", err)
+ }
+ }
+
+ return ret, nil
}
func (s *sqlDatabase) sqlToCommonTags(tag Tag) params.Tag {
@@ -188,26 +401,77 @@ func (s *sqlDatabase) sqlToCommonTags(tag Tag) params.Tag {
}
}
-func (s *sqlDatabase) sqlToCommonRepository(repo Repository) (params.Repository, error) {
+func (s *sqlDatabase) sqlToCommonRepository(repo Repository, detailed bool) (params.Repository, error) {
if len(repo.WebhookSecret) == 0 {
return params.Repository{}, errors.New("missing secret")
}
- secret, err := util.Aes256DecodeString(repo.WebhookSecret, s.cfg.Passphrase)
+ secret, err := util.Unseal(repo.WebhookSecret, []byte(s.cfg.Passphrase))
if err != nil {
- return params.Repository{}, errors.Wrap(err, "decrypting secret")
+ return params.Repository{}, fmt.Errorf("error decrypting secret: %w", err)
+ }
+ endpoint, err := s.sqlToCommonGithubEndpoint(repo.Endpoint)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
+ ret := params.Repository{
+ ID: repo.ID.String(),
+ Name: repo.Name,
+ Owner: repo.Owner,
+ CredentialsName: repo.Credentials.Name,
+ Pools: make([]params.Pool, len(repo.Pools)),
+ WebhookSecret: string(secret),
+ PoolBalancerType: repo.PoolBalancerType,
+ CreatedAt: repo.CreatedAt,
+ UpdatedAt: repo.UpdatedAt,
+ Endpoint: endpoint,
}
- ret := params.Repository{
- ID: repo.ID.String(),
- Name: repo.Name,
- Owner: repo.Owner,
- CredentialsName: repo.CredentialsName,
- Pools: make([]params.Pool, len(repo.Pools)),
- WebhookSecret: secret,
+ if repo.CredentialsID != nil && repo.GiteaCredentialsID != nil {
+ return params.Repository{}, runnerErrors.NewConflictError("both gitea and github credentials are set for repo %s", repo.Name)
+ }
+
+ var forgeCreds params.ForgeCredentials
+ if repo.CredentialsID != nil {
+ ret.CredentialsID = *repo.CredentialsID
+ forgeCreds, err = s.sqlToCommonForgeCredentials(repo.Credentials)
+ }
+
+ if repo.GiteaCredentialsID != nil {
+ ret.CredentialsID = *repo.GiteaCredentialsID
+ forgeCreds, err = s.sqlGiteaToCommonForgeCredentials(repo.GiteaCredentials)
+ }
+
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error converting credentials: %w", err)
+ }
+
+ if len(repo.Events) > 0 {
+ ret.Events = make([]params.EntityEvent, len(repo.Events))
+ for idx, event := range repo.Events {
+ ret.Events[idx] = params.EntityEvent{
+ ID: event.ID,
+ Message: event.Message,
+ EventType: event.EventType,
+ EventLevel: event.EventLevel,
+ CreatedAt: event.CreatedAt,
+ }
+ }
+ }
+
+ if detailed {
+ ret.Credentials = forgeCreds
+ ret.CredentialsName = forgeCreds.Name
+ }
+
+ if ret.PoolBalancerType == "" {
+ ret.PoolBalancerType = params.PoolBalancerTypeRoundRobin
}
for idx, pool := range repo.Pools {
- ret.Pools[idx] = s.sqlToCommonPool(pool)
+ ret.Pools[idx], err = s.sqlToCommonPool(pool)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
@@ -215,39 +479,39 @@ func (s *sqlDatabase) sqlToCommonRepository(repo Repository) (params.Repository,
func (s *sqlDatabase) sqlToParamsUser(user User) params.User {
return params.User{
- ID: user.ID.String(),
- CreatedAt: user.CreatedAt,
- UpdatedAt: user.UpdatedAt,
- Email: user.Email,
- Username: user.Username,
- FullName: user.FullName,
- Password: user.Password,
- Enabled: user.Enabled,
- IsAdmin: user.IsAdmin,
+ ID: user.ID.String(),
+ CreatedAt: user.CreatedAt,
+ UpdatedAt: user.UpdatedAt,
+ Email: user.Email,
+ Username: user.Username,
+ FullName: user.FullName,
+ Password: user.Password,
+ Enabled: user.Enabled,
+ IsAdmin: user.IsAdmin,
+ Generation: user.Generation,
}
}
-func (s *sqlDatabase) getOrCreateTag(tagName string) (Tag, error) {
+func (s *sqlDatabase) getOrCreateTag(tx *gorm.DB, tagName string) (Tag, error) {
var tag Tag
- q := s.conn.Where("name = ?", tagName).First(&tag)
+ q := tx.Where("name = ? COLLATE NOCASE", tagName).First(&tag)
if q.Error == nil {
return tag, nil
}
if !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return Tag{}, errors.Wrap(q.Error, "fetching tag from database")
+ return Tag{}, fmt.Errorf("error fetching tag from database: %w", q.Error)
}
newTag := Tag{
Name: tagName,
}
- q = s.conn.Create(&newTag)
- if q.Error != nil {
- return Tag{}, errors.Wrap(q.Error, "creating tag")
+ if err := tx.Create(&newTag).Error; err != nil {
+ return Tag{}, fmt.Errorf("error creating tag: %w", err)
}
return newTag, nil
}
-func (s *sqlDatabase) updatePool(pool Pool, param params.UpdatePoolParams) (params.Pool, error) {
+func (s *sqlDatabase) updatePool(tx *gorm.DB, pool Pool, param params.UpdatePoolParams) (params.Pool, error) {
if param.Enabled != nil && pool.Enabled != *param.Enabled {
pool.Enabled = *param.Enabled
}
@@ -292,24 +556,410 @@ func (s *sqlDatabase) updatePool(pool Pool, param params.UpdatePoolParams) (para
pool.GitHubRunnerGroup = *param.GitHubRunnerGroup
}
- if q := s.conn.Save(&pool); q.Error != nil {
- return params.Pool{}, errors.Wrap(q.Error, "saving database entry")
+ if param.Priority != nil {
+ pool.Priority = *param.Priority
+ }
+
+ if q := tx.Save(&pool); q.Error != nil {
+ return params.Pool{}, fmt.Errorf("error saving database entry: %w", q.Error)
}
tags := []Tag{}
- if param.Tags != nil && len(param.Tags) > 0 {
+ if len(param.Tags) > 0 {
for _, val := range param.Tags {
- t, err := s.getOrCreateTag(val)
+ t, err := s.getOrCreateTag(tx, val)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching tag")
+ return params.Pool{}, fmt.Errorf("error fetching tag: %w", err)
}
tags = append(tags, t)
}
- if err := s.conn.Model(&pool).Association("Tags").Replace(&tags); err != nil {
- return params.Pool{}, errors.Wrap(err, "replacing tags")
+ if err := tx.Model(&pool).Association("Tags").Replace(&tags); err != nil {
+ return params.Pool{}, fmt.Errorf("error replacing tags: %w", err)
}
}
- return s.sqlToCommonPool(pool), nil
+ return s.sqlToCommonPool(pool)
+}
+
+func (s *sqlDatabase) getPoolByID(tx *gorm.DB, poolID string, preload ...string) (Pool, error) {
+ u, err := uuid.Parse(poolID)
+ if err != nil {
+ return Pool{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+ var pool Pool
+ q := tx.Model(&Pool{})
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ q = q.Where("id = ?", u).First(&pool)
+
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return Pool{}, runnerErrors.ErrNotFound
+ }
+ return Pool{}, fmt.Errorf("error fetching org from database: %w", q.Error)
+ }
+ return pool, nil
+}
+
+func (s *sqlDatabase) getScaleSetByID(tx *gorm.DB, scaleSetID uint, preload ...string) (ScaleSet, error) {
+ var scaleSet ScaleSet
+ q := tx.Model(&ScaleSet{})
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ q = q.Where("id = ?", scaleSetID).First(&scaleSet)
+
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return ScaleSet{}, runnerErrors.ErrNotFound
+ }
+ return ScaleSet{}, fmt.Errorf("error fetching scale set from database: %w", q.Error)
+ }
+ return scaleSet, nil
+}
+
+func (s *sqlDatabase) hasGithubEntity(tx *gorm.DB, entityType params.ForgeEntityType, entityID string) error {
+ u, err := uuid.Parse(entityID)
+ if err != nil {
+ return fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+ var q *gorm.DB
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ q = tx.Model(&Repository{}).Where("id = ?", u)
+ case params.ForgeEntityTypeOrganization:
+ q = tx.Model(&Organization{}).Where("id = ?", u)
+ case params.ForgeEntityTypeEnterprise:
+ q = tx.Model(&Enterprise{}).Where("id = ?", u)
+ default:
+ return fmt.Errorf("error invalid entity type: %w", runnerErrors.ErrBadRequest)
+ }
+
+ var entity interface{}
+ if err := q.First(entity).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error entity not found: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching entity from database: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) marshalAndSeal(data interface{}) ([]byte, error) {
+ enc, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("error marshalling data: %w", err)
+ }
+ return util.Seal(enc, []byte(s.cfg.Passphrase))
+}
+
+func (s *sqlDatabase) unsealAndUnmarshal(data []byte, target interface{}) error {
+ decrypted, err := util.Unseal(data, []byte(s.cfg.Passphrase))
+ if err != nil {
+ return fmt.Errorf("error decrypting data: %w", err)
+ }
+ if err := json.Unmarshal(decrypted, target); err != nil {
+ return fmt.Errorf("error unmarshalling data: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) sendNotify(entityType dbCommon.DatabaseEntityType, op dbCommon.OperationType, payload interface{}) error {
+ if s.producer == nil {
+ // no producer was registered. Not sending notifications.
+ return nil
+ }
+ if payload == nil {
+ return errors.New("missing payload")
+ }
+ message := dbCommon.ChangePayload{
+ Operation: op,
+ Payload: payload,
+ EntityType: entityType,
+ }
+ return s.producer.Notify(message)
+}
+
+func (s *sqlDatabase) GetForgeEntity(_ context.Context, entityType params.ForgeEntityType, entityID string) (params.ForgeEntity, error) {
+ var ghEntity params.EntityGetter
+ var err error
+ switch entityType {
+ case params.ForgeEntityTypeEnterprise:
+ ghEntity, err = s.GetEnterpriseByID(s.ctx, entityID)
+ case params.ForgeEntityTypeOrganization:
+ ghEntity, err = s.GetOrganizationByID(s.ctx, entityID)
+ case params.ForgeEntityTypeRepository:
+ ghEntity, err = s.GetRepositoryByID(s.ctx, entityID)
+ default:
+ return params.ForgeEntity{}, fmt.Errorf("error invalid entity type: %w", runnerErrors.ErrBadRequest)
+ }
+ if err != nil {
+ return params.ForgeEntity{}, fmt.Errorf("error failed to get entity from db: %w", err)
+ }
+
+ entity, err := ghEntity.GetEntity()
+ if err != nil {
+ return params.ForgeEntity{}, fmt.Errorf("error failed to get entity: %w", err)
+ }
+ return entity, nil
+}
+
+func (s *sqlDatabase) addRepositoryEvent(ctx context.Context, repoID string, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ repo, err := s.getRepoByID(ctx, s.conn, repoID)
+ if err != nil {
+ return fmt.Errorf("error updating instance: %w", err)
+ }
+
+ msg := RepositoryEvent{
+ Message: statusMessage,
+ EventType: event,
+ EventLevel: eventLevel,
+ }
+
+ if err := s.conn.Model(&repo).Association("Events").Append(&msg); err != nil {
+ return fmt.Errorf("error adding status message: %w", err)
+ }
+
+ if maxEvents > 0 {
+ var latestEvents []RepositoryEvent
+ q := s.conn.Model(&RepositoryEvent{}).
+ Limit(maxEvents).Order("id desc").
+ Where("repo_id = ?", repo.ID).Find(&latestEvents)
+ if q.Error != nil {
+ return fmt.Errorf("error fetching latest events: %w", q.Error)
+ }
+ if len(latestEvents) == maxEvents {
+ lastInList := latestEvents[len(latestEvents)-1]
+ if err := s.conn.Where("repo_id = ? and id < ?", repo.ID, lastInList.ID).Unscoped().Delete(&RepositoryEvent{}).Error; err != nil {
+ return fmt.Errorf("error deleting old events: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
+func (s *sqlDatabase) addOrgEvent(ctx context.Context, orgID string, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ org, err := s.getOrgByID(ctx, s.conn, orgID)
+ if err != nil {
+ return fmt.Errorf("error updating instance: %w", err)
+ }
+
+ msg := OrganizationEvent{
+ Message: statusMessage,
+ EventType: event,
+ EventLevel: eventLevel,
+ }
+
+ if err := s.conn.Model(&org).Association("Events").Append(&msg); err != nil {
+ return fmt.Errorf("error adding status message: %w", err)
+ }
+
+ if maxEvents > 0 {
+ var latestEvents []OrganizationEvent
+ q := s.conn.Model(&OrganizationEvent{}).
+ Limit(maxEvents).Order("id desc").
+ Where("org_id = ?", org.ID).Find(&latestEvents)
+ if q.Error != nil {
+ return fmt.Errorf("error fetching latest events: %w", q.Error)
+ }
+ if len(latestEvents) == maxEvents {
+ lastInList := latestEvents[len(latestEvents)-1]
+ if err := s.conn.Where("org_id = ? and id < ?", org.ID, lastInList.ID).Unscoped().Delete(&OrganizationEvent{}).Error; err != nil {
+ return fmt.Errorf("error deleting old events: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
+func (s *sqlDatabase) addEnterpriseEvent(ctx context.Context, entID string, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ ent, err := s.getEnterpriseByID(ctx, s.conn, entID)
+ if err != nil {
+ return fmt.Errorf("error updating instance: %w", err)
+ }
+
+ msg := EnterpriseEvent{
+ Message: statusMessage,
+ EventType: event,
+ EventLevel: eventLevel,
+ }
+
+ if err := s.conn.Model(&ent).Association("Events").Append(&msg); err != nil {
+ return fmt.Errorf("error adding status message: %w", err)
+ }
+
+ if maxEvents > 0 {
+ var latestEvents []EnterpriseEvent
+ q := s.conn.Model(&EnterpriseEvent{}).
+ Limit(maxEvents).Order("id desc").
+ Where("enterprise_id = ?", ent.ID).Find(&latestEvents)
+ if q.Error != nil {
+ return fmt.Errorf("error fetching latest events: %w", q.Error)
+ }
+ if len(latestEvents) == maxEvents {
+ lastInList := latestEvents[len(latestEvents)-1]
+ if err := s.conn.Where("enterprise_id = ? and id < ?", ent.ID, lastInList.ID).Unscoped().Delete(&EnterpriseEvent{}).Error; err != nil {
+ return fmt.Errorf("error deleting old events: %w", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *sqlDatabase) AddEntityEvent(ctx context.Context, entity params.ForgeEntity, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ if maxEvents == 0 {
+ return fmt.Errorf("max events cannot be 0: %w", runnerErrors.ErrBadRequest)
+ }
+
+ switch entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ return s.addRepositoryEvent(ctx, entity.ID, event, eventLevel, statusMessage, maxEvents)
+ case params.ForgeEntityTypeOrganization:
+ return s.addOrgEvent(ctx, entity.ID, event, eventLevel, statusMessage, maxEvents)
+ case params.ForgeEntityTypeEnterprise:
+ return s.addEnterpriseEvent(ctx, entity.ID, event, eventLevel, statusMessage, maxEvents)
+ default:
+ return fmt.Errorf("invalid entity type: %w", runnerErrors.ErrBadRequest)
+ }
+}
+
+func (s *sqlDatabase) sqlToCommonForgeCredentials(creds GithubCredentials) (params.ForgeCredentials, error) {
+ if len(creds.Payload) == 0 {
+ return params.ForgeCredentials{}, errors.New("empty credentials payload")
+ }
+ data, err := util.Unseal(creds.Payload, []byte(s.cfg.Passphrase))
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error unsealing credentials: %w", err)
+ }
+
+ ep, err := s.sqlToCommonGithubEndpoint(creds.Endpoint)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+
+ commonCreds := params.ForgeCredentials{
+ ID: creds.ID,
+ Name: creds.Name,
+ Description: creds.Description,
+ APIBaseURL: creds.Endpoint.APIBaseURL,
+ BaseURL: creds.Endpoint.BaseURL,
+ UploadBaseURL: creds.Endpoint.UploadBaseURL,
+ CABundle: creds.Endpoint.CACertBundle,
+ AuthType: creds.AuthType,
+ CreatedAt: creds.CreatedAt,
+ UpdatedAt: creds.UpdatedAt,
+ ForgeType: creds.Endpoint.EndpointType,
+ Endpoint: ep,
+ CredentialsPayload: data,
+ }
+
+ for _, repo := range creds.Repositories {
+ commonRepo, err := s.sqlToCommonRepository(repo, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github repository: %w", err)
+ }
+ commonCreds.Repositories = append(commonCreds.Repositories, commonRepo)
+ }
+
+ for _, org := range creds.Organizations {
+ commonOrg, err := s.sqlToCommonOrganization(org, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github organization: %w", err)
+ }
+ commonCreds.Organizations = append(commonCreds.Organizations, commonOrg)
+ }
+
+ for _, ent := range creds.Enterprises {
+ commonEnt, err := s.sqlToCommonEnterprise(ent, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github enterprise %s: %w", ent.Name, err)
+ }
+ commonCreds.Enterprises = append(commonCreds.Enterprises, commonEnt)
+ }
+
+ return commonCreds, nil
+}
+
+func (s *sqlDatabase) sqlGiteaToCommonForgeCredentials(creds GiteaCredentials) (params.ForgeCredentials, error) {
+ if len(creds.Payload) == 0 {
+ return params.ForgeCredentials{}, errors.New("empty credentials payload")
+ }
+ data, err := util.Unseal(creds.Payload, []byte(s.cfg.Passphrase))
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error unsealing credentials: %w", err)
+ }
+
+ ep, err := s.sqlToCommonGithubEndpoint(creds.Endpoint)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+
+ commonCreds := params.ForgeCredentials{
+ ID: creds.ID,
+ Name: creds.Name,
+ Description: creds.Description,
+ APIBaseURL: creds.Endpoint.APIBaseURL,
+ BaseURL: creds.Endpoint.BaseURL,
+ CABundle: creds.Endpoint.CACertBundle,
+ AuthType: creds.AuthType,
+ CreatedAt: creds.CreatedAt,
+ UpdatedAt: creds.UpdatedAt,
+ ForgeType: creds.Endpoint.EndpointType,
+ Endpoint: ep,
+ CredentialsPayload: data,
+ }
+
+ for _, repo := range creds.Repositories {
+ commonRepo, err := s.sqlToCommonRepository(repo, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github repository: %w", err)
+ }
+ commonCreds.Repositories = append(commonCreds.Repositories, commonRepo)
+ }
+
+ for _, org := range creds.Organizations {
+ commonOrg, err := s.sqlToCommonOrganization(org, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github organization: %w", err)
+ }
+ commonCreds.Organizations = append(commonCreds.Organizations, commonOrg)
+ }
+
+ return commonCreds, nil
+}
+
+func (s *sqlDatabase) sqlToCommonGithubEndpoint(ep GithubEndpoint) (params.ForgeEndpoint, error) {
+ return params.ForgeEndpoint{
+ Name: ep.Name,
+ Description: ep.Description,
+ APIBaseURL: ep.APIBaseURL,
+ BaseURL: ep.BaseURL,
+ UploadBaseURL: ep.UploadBaseURL,
+ CACertBundle: ep.CACertBundle,
+ CreatedAt: ep.CreatedAt,
+ EndpointType: ep.EndpointType,
+ UpdatedAt: ep.UpdatedAt,
+ }, nil
+}
+
+func getUIDFromContext(ctx context.Context) (uuid.UUID, error) {
+ userID := auth.UserID(ctx)
+ if userID == "" {
+ return uuid.Nil, fmt.Errorf("error getting UID from context: %w", runnerErrors.ErrUnauthorized)
+ }
+
+ asUUID, err := uuid.Parse(userID)
+ if err != nil {
+ return uuid.Nil, fmt.Errorf("error parsing UID from context: %w", runnerErrors.ErrUnauthorized)
+ }
+ return asUUID, nil
}
diff --git a/database/watcher/consumer.go b/database/watcher/consumer.go
new file mode 100644
index 00000000..ed0967e9
--- /dev/null
+++ b/database/watcher/consumer.go
@@ -0,0 +1,98 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher
+
+import (
+ "context"
+ "log/slog"
+ "sync"
+ "time"
+
+ "github.com/cloudbase/garm/database/common"
+)
+
+type consumer struct {
+ messages chan common.ChangePayload
+ filters []common.PayloadFilterFunc
+ id string
+
+ mux sync.Mutex
+ closed bool
+ quit chan struct{}
+ ctx context.Context
+}
+
+func (w *consumer) SetFilters(filters ...common.PayloadFilterFunc) {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ w.filters = filters
+}
+
+func (w *consumer) Watch() <-chan common.ChangePayload {
+ return w.messages
+}
+
+func (w *consumer) Close() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if w.closed {
+ return
+ }
+ close(w.messages)
+ close(w.quit)
+ w.closed = true
+}
+
+func (w *consumer) IsClosed() bool {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ return w.closed
+}
+
+func (w *consumer) Send(payload common.ChangePayload) {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+
+ if w.closed {
+ return
+ }
+
+ if len(w.filters) > 0 {
+ shouldSend := true
+ for _, filter := range w.filters {
+ if !filter(payload) {
+ shouldSend = false
+ break
+ }
+ }
+
+ if !shouldSend {
+ return
+ }
+ }
+
+ timer := time.NewTimer(1 * time.Second)
+ defer timer.Stop()
+ slog.DebugContext(w.ctx, "sending payload")
+ select {
+ case <-w.quit:
+ slog.DebugContext(w.ctx, "consumer is closed")
+ case <-w.ctx.Done():
+ slog.DebugContext(w.ctx, "consumer is closed")
+ case <-timer.C:
+ slog.DebugContext(w.ctx, "timeout trying to send payload", "payload", payload)
+ case w.messages <- payload:
+ }
+}
diff --git a/database/watcher/filters.go b/database/watcher/filters.go
new file mode 100644
index 00000000..acf79ba8
--- /dev/null
+++ b/database/watcher/filters.go
@@ -0,0 +1,338 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher
+
+import (
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+type IDGetter interface {
+ GetID() string
+}
+
+// WithAny returns a filter function that returns true if any of the provided filters return true.
+// This filter is useful if for example you want to watch for update operations on any of the supplied
+// entities.
+// Example:
+//
+// // Watch for any update operation on repositories or organizations
+// consumer.SetFilters(
+// watcher.WithOperationTypeFilter(common.UpdateOperation),
+// watcher.WithAny(
+// watcher.WithEntityTypeFilter(common.RepositoryEntityType),
+// watcher.WithEntityTypeFilter(common.OrganizationEntityType),
+// ))
+func WithAny(filters ...dbCommon.PayloadFilterFunc) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ for _, filter := range filters {
+ if filter(payload) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// WithAll returns a filter function that returns true if all of the provided filters return true.
+func WithAll(filters ...dbCommon.PayloadFilterFunc) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ for _, filter := range filters {
+ if !filter(payload) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// WithEntityTypeFilter returns a filter function that filters payloads by entity type.
+// The filter function returns true if the payload's entity type matches the provided entity type.
+func WithEntityTypeFilter(entityType dbCommon.DatabaseEntityType) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ return payload.EntityType == entityType
+ }
+}
+
+// WithOperationTypeFilter returns a filter function that filters payloads by operation type.
+func WithOperationTypeFilter(operationType dbCommon.OperationType) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ return payload.Operation == operationType
+ }
+}
+
+// WithEntityPoolFilter returns true if the change payload is a pool that belongs to the
+// supplied Github entity. This is useful when an entity worker wants to watch for changes
+// in pools that belong to it.
+func WithEntityPoolFilter(ghEntity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ switch payload.EntityType {
+ case dbCommon.PoolEntityType:
+ pool, ok := payload.Payload.(params.Pool)
+ if !ok {
+ return false
+ }
+ switch ghEntity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ return pool.RepoID == ghEntity.ID
+ case params.ForgeEntityTypeOrganization:
+ return pool.OrgID == ghEntity.ID
+ case params.ForgeEntityTypeEnterprise:
+ return pool.EnterpriseID == ghEntity.ID
+ default:
+ return false
+ }
+ default:
+ return false
+ }
+ }
+}
+
+// WithEntityScaleSetFilter returns true if the change payload is a scale set that belongs to the
+// supplied Github entity.
+func WithEntityScaleSetFilter(ghEntity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ forgeType, err := ghEntity.GetForgeType()
+ if err != nil {
+ return false
+ }
+
+ // Gitea does not have scale sets.
+ if forgeType == params.GiteaEndpointType {
+ return false
+ }
+
+ switch payload.EntityType {
+ case dbCommon.ScaleSetEntityType:
+ scaleSet, ok := payload.Payload.(params.ScaleSet)
+ if !ok {
+ return false
+ }
+ switch ghEntity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ return scaleSet.RepoID == ghEntity.ID
+ case params.ForgeEntityTypeOrganization:
+ return scaleSet.OrgID == ghEntity.ID
+ case params.ForgeEntityTypeEnterprise:
+ return scaleSet.EnterpriseID == ghEntity.ID
+ default:
+ return false
+ }
+ default:
+ return false
+ }
+ }
+}
+
+// WithEntityFilter returns a filter function that filters payloads by entity.
+// Change payloads that match the entity type and ID will return true.
+func WithEntityFilter(entity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if params.ForgeEntityType(payload.EntityType) != entity.EntityType {
+ return false
+ }
+ var ent IDGetter
+ var ok bool
+ switch payload.EntityType {
+ case dbCommon.RepositoryEntityType:
+ if entity.EntityType != params.ForgeEntityTypeRepository {
+ return false
+ }
+ ent, ok = payload.Payload.(params.Repository)
+ case dbCommon.OrganizationEntityType:
+ if entity.EntityType != params.ForgeEntityTypeOrganization {
+ return false
+ }
+ ent, ok = payload.Payload.(params.Organization)
+ case dbCommon.EnterpriseEntityType:
+ if entity.EntityType != params.ForgeEntityTypeEnterprise {
+ return false
+ }
+ ent, ok = payload.Payload.(params.Enterprise)
+ default:
+ return false
+ }
+ if !ok {
+ return false
+ }
+ return ent.GetID() == entity.ID
+ }
+}
+
+func WithEntityJobFilter(ghEntity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ switch payload.EntityType {
+ case dbCommon.JobEntityType:
+ job, ok := payload.Payload.(params.Job)
+ if !ok {
+ return false
+ }
+
+ switch ghEntity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ if job.RepoID != nil && job.RepoID.String() != ghEntity.ID {
+ return false
+ }
+ case params.ForgeEntityTypeOrganization:
+ if job.OrgID != nil && job.OrgID.String() != ghEntity.ID {
+ return false
+ }
+ case params.ForgeEntityTypeEnterprise:
+ if job.EnterpriseID != nil && job.EnterpriseID.String() != ghEntity.ID {
+ return false
+ }
+ default:
+ return false
+ }
+
+ return true
+ default:
+ return false
+ }
+ }
+}
+
+// WithForgeCredentialsFilter returns a filter function that filters payloads by Github or Gitea credentials.
+func WithForgeCredentialsFilter(creds params.ForgeCredentials) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ var forgeCreds params.ForgeCredentials
+ var ok bool
+ switch payload.EntityType {
+ case dbCommon.GithubCredentialsEntityType, dbCommon.GiteaCredentialsEntityType:
+ forgeCreds, ok = payload.Payload.(params.ForgeCredentials)
+ default:
+ return false
+ }
+ if !ok {
+ return false
+ }
+ // Gite and Github creds have different models. The ID is uint, so we
+ // need to explicitly check their type, or risk a clash.
+ if forgeCreds.ForgeType != creds.ForgeType {
+ return false
+ }
+ return forgeCreds.GetID() == creds.GetID()
+ }
+}
+
+// WithUserIDFilter returns a filter function that filters payloads by user ID.
+func WithUserIDFilter(userID string) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != dbCommon.UserEntityType {
+ return false
+ }
+ userPayload, ok := payload.Payload.(params.User)
+ if !ok {
+ return false
+ }
+ return userPayload.ID == userID
+ }
+}
+
+// WithNone returns a filter function that always returns false.
+func WithNone() dbCommon.PayloadFilterFunc {
+ return func(_ dbCommon.ChangePayload) bool {
+ return false
+ }
+}
+
+// WithEverything returns a filter function that always returns true.
+func WithEverything() dbCommon.PayloadFilterFunc {
+ return func(_ dbCommon.ChangePayload) bool {
+ return true
+ }
+}
+
+// WithExcludeEntityTypeFilter returns a filter function that filters payloads by excluding
+// the provided entity type.
+func WithExcludeEntityTypeFilter(entityType dbCommon.DatabaseEntityType) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ return payload.EntityType != entityType
+ }
+}
+
+// WithScaleSetFilter returns a filter function that matches a particular scale set.
+func WithScaleSetFilter(scaleset params.ScaleSet) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != dbCommon.ScaleSetEntityType {
+ return false
+ }
+
+ ss, ok := payload.Payload.(params.ScaleSet)
+ if !ok {
+ return false
+ }
+
+ return ss.ID == scaleset.ID
+ }
+}
+
+func WithScaleSetInstanceFilter(scaleset params.ScaleSet) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != dbCommon.InstanceEntityType {
+ return false
+ }
+
+ instance, ok := payload.Payload.(params.Instance)
+ if !ok || instance.ScaleSetID == 0 {
+ return false
+ }
+
+ return instance.ScaleSetID == scaleset.ID
+ }
+}
+
+// EntityTypeCallbackFilter is a callback function that takes a ChangePayload and returns a boolean.
+// This callback type is used in the WithEntityTypeAndCallbackFilter (and potentially others) when
+// a filter needs to delegate logic to a specific callback function.
+type EntityTypeCallbackFilter func(payload dbCommon.ChangePayload) (bool, error)
+
+// WithEntityTypeAndCallbackFilter returns a filter function that filters payloads by entity type and the
+// result of a callback function.
+func WithEntityTypeAndCallbackFilter(entityType dbCommon.DatabaseEntityType, callback EntityTypeCallbackFilter) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != entityType {
+ return false
+ }
+
+ ok, err := callback(payload)
+ if err != nil {
+ return false
+ }
+ return ok
+ }
+}
+
+func WithInstanceStatusFilter(statuses ...commonParams.InstanceStatus) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != dbCommon.InstanceEntityType {
+ return false
+ }
+ instance, ok := payload.Payload.(params.Instance)
+ if !ok {
+ return false
+ }
+ if len(statuses) == 0 {
+ return false
+ }
+ for _, status := range statuses {
+ if instance.Status == status {
+ return true
+ }
+ }
+ return false
+ }
+}
diff --git a/database/watcher/producer.go b/database/watcher/producer.go
new file mode 100644
index 00000000..927aada0
--- /dev/null
+++ b/database/watcher/producer.go
@@ -0,0 +1,72 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/cloudbase/garm/database/common"
+)
+
+type producer struct {
+ closed bool
+ mux sync.Mutex
+ id string
+
+ messages chan common.ChangePayload
+ quit chan struct{}
+ ctx context.Context
+}
+
+func (w *producer) Notify(payload common.ChangePayload) error {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+
+ if w.closed {
+ return common.ErrProducerClosed
+ }
+
+ timer := time.NewTimer(1 * time.Second)
+ defer timer.Stop()
+ select {
+ case <-w.quit:
+ return common.ErrProducerClosed
+ case <-w.ctx.Done():
+ return common.ErrProducerClosed
+ case <-timer.C:
+ return common.ErrProducerTimeoutErr
+ case w.messages <- payload:
+ }
+ return nil
+}
+
+func (w *producer) Close() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if w.closed {
+ return
+ }
+ w.closed = true
+ close(w.messages)
+ close(w.quit)
+}
+
+func (w *producer) IsClosed() bool {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ return w.closed
+}
diff --git a/database/watcher/test_export.go b/database/watcher/test_export.go
new file mode 100644
index 00000000..eb3d38b6
--- /dev/null
+++ b/database/watcher/test_export.go
@@ -0,0 +1,30 @@
+//go:build testing
+// +build testing
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package watcher
+
+import "github.com/cloudbase/garm/database/common"
+
+// SetWatcher sets the watcher to be used by the database package.
+// This function is intended for use in tests only.
+func SetWatcher(w common.Watcher) {
+ databaseWatcher = w
+}
+
+// GetWatcher returns the current watcher.
+func GetWatcher() common.Watcher {
+ return databaseWatcher
+}
diff --git a/database/watcher/util_test.go b/database/watcher/util_test.go
new file mode 100644
index 00000000..82b94491
--- /dev/null
+++ b/database/watcher/util_test.go
@@ -0,0 +1,16 @@
+package watcher_test
+
+import (
+ "time"
+
+ "github.com/cloudbase/garm/database/common"
+)
+
+func waitForPayload(ch <-chan common.ChangePayload, timeout time.Duration) *common.ChangePayload {
+ select {
+ case payload := <-ch:
+ return &payload
+ case <-time.After(timeout):
+ return nil
+ }
+}
diff --git a/database/watcher/watcher.go b/database/watcher/watcher.go
new file mode 100644
index 00000000..804dec70
--- /dev/null
+++ b/database/watcher/watcher.go
@@ -0,0 +1,204 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "sync"
+
+ "github.com/cloudbase/garm/database/common"
+ garmUtil "github.com/cloudbase/garm/util"
+)
+
+var databaseWatcher common.Watcher
+
+func InitWatcher(ctx context.Context) {
+ if databaseWatcher != nil {
+ return
+ }
+ ctx = garmUtil.WithSlogContext(ctx, slog.Any("watcher", "database"))
+ w := &watcher{
+ producers: make(map[string]*producer),
+ consumers: make(map[string]*consumer),
+ quit: make(chan struct{}),
+ ctx: ctx,
+ }
+
+ go w.loop()
+ databaseWatcher = w
+}
+
+func CloseWatcher() error {
+ if databaseWatcher == nil {
+ return nil
+ }
+ databaseWatcher.Close()
+ databaseWatcher = nil
+ return nil
+}
+
+func RegisterProducer(ctx context.Context, id string) (common.Producer, error) {
+ if databaseWatcher == nil {
+ return nil, common.ErrWatcherNotInitialized
+ }
+ ctx = garmUtil.WithSlogContext(ctx, slog.Any("producer_id", id))
+ return databaseWatcher.RegisterProducer(ctx, id)
+}
+
+func RegisterConsumer(ctx context.Context, id string, filters ...common.PayloadFilterFunc) (common.Consumer, error) {
+ if databaseWatcher == nil {
+ return nil, common.ErrWatcherNotInitialized
+ }
+ ctx = garmUtil.WithSlogContext(ctx, slog.Any("consumer_id", id))
+ return databaseWatcher.RegisterConsumer(ctx, id, filters...)
+}
+
+type watcher struct {
+ producers map[string]*producer
+ consumers map[string]*consumer
+
+ mux sync.Mutex
+ closed bool
+ quit chan struct{}
+ ctx context.Context
+}
+
+func (w *watcher) RegisterProducer(ctx context.Context, id string) (common.Producer, error) {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+
+ if _, ok := w.producers[id]; ok {
+ return nil, fmt.Errorf("producer_id %s: %w", id, common.ErrProducerAlreadyRegistered)
+ }
+ p := &producer{
+ id: id,
+ messages: make(chan common.ChangePayload, 1),
+ quit: make(chan struct{}),
+ ctx: ctx,
+ }
+ w.producers[id] = p
+ go w.serviceProducer(p)
+ return p, nil
+}
+
+func (w *watcher) serviceProducer(prod *producer) {
+ defer func() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ prod.Close()
+ slog.InfoContext(w.ctx, "removing producer from watcher", "consumer_id", prod.id)
+ delete(w.producers, prod.id)
+ }()
+ for {
+ select {
+ case <-w.quit:
+ slog.InfoContext(w.ctx, "shutting down watcher")
+ return
+ case <-w.ctx.Done():
+ slog.InfoContext(w.ctx, "shutting down watcher")
+ return
+ case <-prod.quit:
+ slog.InfoContext(w.ctx, "closing producer")
+ return
+ case <-prod.ctx.Done():
+ slog.InfoContext(w.ctx, "closing producer")
+ return
+ case payload := <-prod.messages:
+ w.mux.Lock()
+ for _, c := range w.consumers {
+ go c.Send(payload)
+ }
+ w.mux.Unlock()
+ }
+ }
+}
+
+func (w *watcher) RegisterConsumer(ctx context.Context, id string, filters ...common.PayloadFilterFunc) (common.Consumer, error) {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if _, ok := w.consumers[id]; ok {
+ return nil, common.ErrConsumerAlreadyRegistered
+ }
+ c := &consumer{
+ messages: make(chan common.ChangePayload, 1),
+ filters: filters,
+ quit: make(chan struct{}),
+ id: id,
+ ctx: ctx,
+ }
+ w.consumers[id] = c
+ go w.serviceConsumer(c)
+ return c, nil
+}
+
+func (w *watcher) serviceConsumer(consumer *consumer) {
+ defer func() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ consumer.Close()
+ slog.InfoContext(w.ctx, "removing consumer from watcher", "consumer_id", consumer.id)
+ delete(w.consumers, consumer.id)
+ }()
+ slog.InfoContext(w.ctx, "starting consumer", "consumer_id", consumer.id)
+ for {
+ select {
+ case <-consumer.quit:
+ return
+ case <-consumer.ctx.Done():
+ return
+ case <-w.quit:
+ return
+ case <-w.ctx.Done():
+ return
+ }
+ }
+}
+
+func (w *watcher) Close() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if w.closed {
+ return
+ }
+
+ close(w.quit)
+ w.closed = true
+
+ for _, p := range w.producers {
+ p.Close()
+ }
+
+ for _, c := range w.consumers {
+ c.Close()
+ }
+
+ databaseWatcher = nil
+}
+
+func (w *watcher) loop() {
+ defer func() {
+ w.Close()
+ }()
+ for {
+ select {
+ case <-w.quit:
+ return
+ case <-w.ctx.Done():
+ return
+ }
+ }
+}
diff --git a/database/watcher/watcher_store_test.go b/database/watcher/watcher_store_test.go
new file mode 100644
index 00000000..97fc8a9d
--- /dev/null
+++ b/database/watcher/watcher_store_test.go
@@ -0,0 +1,1105 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher_test
+
+import (
+ "context"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/suite"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type WatcherStoreTestSuite struct {
+ suite.Suite
+
+ store common.Store
+ ctx context.Context
+}
+
+func (s *WatcherStoreTestSuite) TestJobWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "job-test",
+ watcher.WithEntityTypeFilter(common.JobEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ jobParams := params.Job{
+ WorkflowJobID: 2,
+ RunID: 2,
+ Action: "test-action",
+ Conclusion: "started",
+ Status: "in_progress",
+ Name: "test-job",
+ }
+
+ job, err := s.store.CreateOrUpdateJob(s.ctx, jobParams)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.CreateOperation,
+ Payload: job,
+ }, event)
+ asJob, ok := event.Payload.(params.Job)
+ s.Require().True(ok)
+ s.Require().Equal(job.ID, int64(1))
+ s.Require().Equal(asJob.ID, int64(1))
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ job.Conclusion = "success"
+ updatedJob, err := s.store.CreateOrUpdateJob(s.ctx, job)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedJob,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ entityID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ err = s.store.LockJob(s.ctx, updatedJob.WorkflowJobID, entityID.String())
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(event.Operation, common.UpdateOperation)
+ s.Require().Equal(event.EntityType, common.JobEntityType)
+
+ job, ok := event.Payload.(params.Job)
+ s.Require().True(ok)
+ s.Require().Equal(job.ID, updatedJob.ID)
+ s.Require().Equal(job.LockedBy, entityID)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.UnlockJob(s.ctx, updatedJob.WorkflowJobID, entityID.String())
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(event.Operation, common.UpdateOperation)
+ s.Require().Equal(event.EntityType, common.JobEntityType)
+
+ job, ok := event.Payload.(params.Job)
+ s.Require().True(ok)
+ s.Require().Equal(job.ID, updatedJob.ID)
+ s.Require().Equal(job.LockedBy, uuid.Nil)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ jobParams.Status = "queued"
+ jobParams.LockedBy = entityID
+
+ updatedJob, err = s.store.CreateOrUpdateJob(s.ctx, jobParams)
+ s.Require().NoError(err)
+ // We don't care about the update event here.
+ consumeEvents(consumer)
+
+ err = s.store.BreakLockJobIsQueued(s.ctx, updatedJob.WorkflowJobID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(event.Operation, common.UpdateOperation)
+ s.Require().Equal(event.EntityType, common.JobEntityType)
+
+ job, ok := event.Payload.(params.Job)
+ s.Require().True(ok)
+ s.Require().Equal(job.ID, updatedJob.ID)
+ s.Require().Equal(uuid.Nil, job.LockedBy)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestInstanceWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "instance-test",
+ watcher.WithEntityTypeFilter(common.InstanceEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createPoolParams := params.CreatePoolParams{
+ ProviderName: "test-provider",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+
+ pool, err := s.store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+ s.T().Cleanup(func() { s.store.DeleteEntityPool(s.ctx, entity, pool.ID) })
+
+ createInstanceParams := params.CreateInstanceParams{
+ Name: "test-instance",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Status: commonParams.InstanceCreating,
+ }
+ instance, err := s.store.CreateInstance(s.ctx, pool.ID, createInstanceParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(instance.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.CreateOperation,
+ Payload: instance,
+ }, event)
+ asInstance, ok := event.Payload.(params.Instance)
+ s.Require().True(ok)
+ s.Require().Equal(instance.Name, "test-instance")
+ s.Require().Equal(asInstance.Name, "test-instance")
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateInstanceParams{
+ RunnerStatus: params.RunnerActive,
+ }
+
+ updatedInstance, err := s.store.UpdateInstance(s.ctx, instance.Name, updateParams)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedInstance,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteInstance(s.ctx, pool.ID, updatedInstance.Name)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.DeleteOperation,
+ Payload: params.Instance{
+ ID: updatedInstance.ID,
+ Name: updatedInstance.Name,
+ ProviderID: updatedInstance.ProviderID,
+ AgentID: updatedInstance.AgentID,
+ PoolID: updatedInstance.PoolID,
+ },
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestScaleSetInstanceWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "instance-test",
+ watcher.WithEntityTypeFilter(common.InstanceEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createScaleSetParams := params.CreateScaleSetParams{
+ ProviderName: "test-provider",
+ Name: "test-scaleset",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ MinIdleRunners: 0,
+ MaxRunners: 1,
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ }
+
+ scaleSet, err := s.store.CreateEntityScaleSet(s.ctx, entity, createScaleSetParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(scaleSet.ID)
+ s.T().Cleanup(func() { s.store.DeleteScaleSetByID(s.ctx, scaleSet.ID) })
+
+ createInstanceParams := params.CreateInstanceParams{
+ Name: "test-instance",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Status: commonParams.InstanceCreating,
+ }
+ instance, err := s.store.CreateScaleSetInstance(s.ctx, scaleSet.ID, createInstanceParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(instance.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.CreateOperation,
+ Payload: instance,
+ }, event)
+ asInstance, ok := event.Payload.(params.Instance)
+ s.Require().True(ok)
+ s.Require().Equal(instance.Name, "test-instance")
+ s.Require().Equal(asInstance.Name, "test-instance")
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateInstanceParams{
+ RunnerStatus: params.RunnerActive,
+ }
+
+ updatedInstance, err := s.store.UpdateInstance(s.ctx, instance.Name, updateParams)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedInstance,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteInstanceByName(s.ctx, updatedInstance.Name)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.DeleteOperation,
+ Payload: params.Instance{
+ ID: updatedInstance.ID,
+ Name: updatedInstance.Name,
+ ProviderID: updatedInstance.ProviderID,
+ AgentID: updatedInstance.AgentID,
+ ScaleSetID: updatedInstance.ScaleSetID,
+ },
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestPoolWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "pool-test",
+ watcher.WithEntityTypeFilter(common.PoolEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() {
+ if err := s.store.DeleteGithubCredentials(s.ctx, creds.ID); err != nil {
+ s.T().Logf("failed to delete Github credentials: %v", err)
+ }
+ })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createPoolParams := params.CreatePoolParams{
+ ProviderName: "test-provider",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+ pool, err := s.store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.CreateOperation,
+ Payload: pool,
+ }, event)
+ asPool, ok := event.Payload.(params.Pool)
+ s.Require().True(ok)
+ s.Require().Equal(pool.Image, "test-image")
+ s.Require().Equal(asPool.Image, "test-image")
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdatePoolParams{
+ Tags: []string{"updated-tag"},
+ }
+
+ updatedPool, err := s.store.UpdateEntityPool(s.ctx, entity, pool.ID, updateParams)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedPool,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteEntityPool(s.ctx, entity, pool.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.DeleteOperation,
+ Payload: params.Pool{ID: pool.ID},
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ // Also test DeletePoolByID
+ pool, err = s.store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+
+ // Consume the create event
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.CreateOperation,
+ Payload: pool,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeletePoolByID(s.ctx, pool.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.DeleteOperation,
+ Payload: params.Pool{ID: pool.ID},
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestScaleSetWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "scaleset-test",
+ watcher.WithEntityTypeFilter(common.ScaleSetEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() {
+ if err := s.store.DeleteGithubCredentials(s.ctx, creds.ID); err != nil {
+ s.T().Logf("failed to delete Github credentials: %v", err)
+ }
+ })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createScaleSetParams := params.CreateScaleSetParams{
+ ProviderName: "test-provider",
+ Name: "test-scaleset",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ MinIdleRunners: 0,
+ MaxRunners: 1,
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+ scaleSet, err := s.store.CreateEntityScaleSet(s.ctx, entity, createScaleSetParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(scaleSet.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.CreateOperation,
+ Payload: scaleSet,
+ }, event)
+ asScaleSet, ok := event.Payload.(params.ScaleSet)
+ s.Require().True(ok)
+ s.Require().Equal(scaleSet.Image, "test-image")
+ s.Require().Equal(asScaleSet.Image, "test-image")
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateScaleSetParams{
+ Flavor: "updated-flavor",
+ }
+
+ callbackFn := func(old, newScaleSet params.ScaleSet) error {
+ s.Require().Equal(old.ID, newScaleSet.ID)
+ s.Require().Equal(old.Flavor, "test-flavor")
+ s.Require().Equal(newScaleSet.Flavor, "updated-flavor")
+ return nil
+ }
+ updatedScaleSet, err := s.store.UpdateEntityScaleSet(s.ctx, entity, scaleSet.ID, updateParams, callbackFn)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedScaleSet,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.SetScaleSetLastMessageID(s.ctx, updatedScaleSet.ID, 99)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ asScaleSet, ok := event.Payload.(params.ScaleSet)
+ s.Require().True(ok)
+ s.Require().Equal(asScaleSet.ID, updatedScaleSet.ID)
+ s.Require().Equal(asScaleSet.LastMessageID, int64(99))
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.SetScaleSetDesiredRunnerCount(s.ctx, updatedScaleSet.ID, 5)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ asScaleSet, ok := event.Payload.(params.ScaleSet)
+ s.Require().True(ok)
+ s.Require().Equal(asScaleSet.ID, updatedScaleSet.ID)
+ s.Require().Equal(asScaleSet.DesiredRunnerCount, 5)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteScaleSetByID(s.ctx, scaleSet.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ // We updated last message ID and desired runner count above.
+ updatedScaleSet.DesiredRunnerCount = 5
+ updatedScaleSet.LastMessageID = 99
+ payloadFromEvent, ok := event.Payload.(params.ScaleSet)
+ s.Require().True(ok)
+ updatedScaleSet.UpdatedAt = payloadFromEvent.UpdatedAt
+ updatedScaleSet.CreatedAt = payloadFromEvent.CreatedAt
+ updatedScaleSet.Endpoint = params.ForgeEndpoint{}
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.DeleteOperation,
+ Payload: updatedScaleSet,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestControllerWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "controller-test",
+ watcher.WithEntityTypeFilter(common.ControllerEntityType),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ metadataURL := "http://metadata.example.com"
+ updateParams := params.UpdateControllerParams{
+ MetadataURL: &metadataURL,
+ }
+
+ controller, err := s.store.UpdateController(updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(metadataURL, controller.MetadataURL)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.UpdateOperation,
+ Payload: controller,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestEnterpriseWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "enterprise-test",
+ watcher.WithEntityTypeFilter(common.EnterpriseEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ ent, err := s.store.CreateEnterprise(s.ctx, "test-enterprise", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(ent.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.CreateOperation,
+ Payload: ent,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateEntityParams{
+ WebhookSecret: "updated",
+ }
+
+ updatedEnt, err := s.store.UpdateEnterprise(s.ctx, ent.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal("updated", updatedEnt.WebhookSecret)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedEnt,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteEnterprise(s.ctx, ent.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.DeleteOperation,
+ Payload: updatedEnt,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestOrgWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "org-test",
+ watcher.WithEntityTypeFilter(common.OrganizationEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ org, err := s.store.CreateOrganization(s.ctx, "test-org", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(org.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.CreateOperation,
+ Payload: org,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateEntityParams{
+ WebhookSecret: "updated",
+ }
+
+ updatedOrg, err := s.store.UpdateOrganization(s.ctx, org.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal("updated", updatedOrg.WebhookSecret)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedOrg,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteOrganization(s.ctx, org.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.DeleteOperation,
+ Payload: updatedOrg,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestRepoWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "repo-test",
+ watcher.WithEntityTypeFilter(common.RepositoryEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.CreateOperation,
+ Payload: repo,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ newSecret := "updated"
+ updateParams := params.UpdateEntityParams{
+ WebhookSecret: newSecret,
+ }
+
+ updatedRepo, err := s.store.UpdateRepository(s.ctx, repo.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newSecret, updatedRepo.WebhookSecret)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedRepo,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteRepository(s.ctx, repo.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.DeleteOperation,
+ Payload: updatedRepo,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestGithubCredentialsWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "gh-cred-test",
+ watcher.WithEntityTypeFilter(common.GithubCredentialsEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ghCredParams := params.CreateGithubCredentialsParams{
+ Name: "test-creds",
+ Description: "test credentials",
+ Endpoint: "github.com",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ }
+
+ ghCred, err := s.store.CreateGithubCredentials(s.ctx, ghCredParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(ghCred.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubCredentialsEntityType,
+ Operation: common.CreateOperation,
+ Payload: ghCred,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ newDesc := "updated description"
+ updateParams := params.UpdateGithubCredentialsParams{
+ Description: &newDesc,
+ }
+
+ updatedGhCred, err := s.store.UpdateGithubCredentials(s.ctx, ghCred.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDesc, updatedGhCred.Description)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedGhCred,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteGithubCredentials(s.ctx, ghCred.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubCredentialsEntityType,
+ Operation: common.DeleteOperation,
+ // We only get the ID and Name of the deleted entity
+ Payload: params.ForgeCredentials{ID: ghCred.ID, Name: ghCred.Name},
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestGiteaCredentialsWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "gitea-cred-test",
+ watcher.WithEntityTypeFilter(common.GiteaCredentialsEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ testEndpointParams := params.CreateGiteaEndpointParams{
+ Name: "test",
+ Description: "test endpoint",
+ APIBaseURL: "https://api.gitea.example.com",
+ BaseURL: "https://gitea.example.com",
+ }
+
+ testEndpoint, err := s.store.CreateGiteaEndpoint(s.ctx, testEndpointParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(testEndpoint.Name)
+
+ s.T().Cleanup(func() {
+ if err := s.store.DeleteGiteaEndpoint(s.ctx, testEndpoint.Name); err != nil {
+ s.T().Logf("failed to delete Gitea endpoint: %v", err)
+ }
+ consumeEvents(consumer)
+ })
+
+ giteaCredParams := params.CreateGiteaCredentialsParams{
+ Name: "test-creds",
+ Description: "test credentials",
+ Endpoint: testEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ }
+
+ giteaCred, err := s.store.CreateGiteaCredentials(s.ctx, giteaCredParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(giteaCred.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GiteaCredentialsEntityType,
+ Operation: common.CreateOperation,
+ Payload: giteaCred,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ newDesc := "updated test description"
+ updateParams := params.UpdateGiteaCredentialsParams{
+ Description: &newDesc,
+ }
+
+ updatedGiteaCred, err := s.store.UpdateGiteaCredentials(s.ctx, giteaCred.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDesc, updatedGiteaCred.Description)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GiteaCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedGiteaCred,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteGiteaCredentials(s.ctx, giteaCred.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ asCreds, ok := event.Payload.(params.ForgeCredentials)
+ s.Require().True(ok)
+ s.Require().Equal(event.Operation, common.DeleteOperation)
+ s.Require().Equal(event.EntityType, common.GiteaCredentialsEntityType)
+ s.Require().Equal(asCreds.ID, updatedGiteaCred.ID)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestGithubEndpointWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "gh-ep-test",
+ watcher.WithEntityTypeFilter(common.GithubEndpointEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ghEpParams := params.CreateGithubEndpointParams{
+ Name: "test",
+ Description: "test endpoint",
+ APIBaseURL: "https://api.ghes.example.com",
+ UploadBaseURL: "https://upload.ghes.example.com",
+ BaseURL: "https://ghes.example.com",
+ }
+
+ ghEp, err := s.store.CreateGithubEndpoint(s.ctx, ghEpParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(ghEp.Name)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubEndpointEntityType,
+ Operation: common.CreateOperation,
+ Payload: ghEp,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ newDesc := "updated description"
+ updateParams := params.UpdateGithubEndpointParams{
+ Description: &newDesc,
+ }
+
+ updatedGhEp, err := s.store.UpdateGithubEndpoint(s.ctx, ghEp.Name, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDesc, updatedGhEp.Description)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubEndpointEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedGhEp,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteGithubEndpoint(s.ctx, ghEp.Name)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubEndpointEntityType,
+ Operation: common.DeleteOperation,
+ // We only get the name of the deleted entity
+ Payload: params.ForgeEndpoint{Name: ghEp.Name},
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func consumeEvents(consumer common.Consumer) {
+consume:
+ for {
+ select {
+ case _, ok := <-consumer.Watch():
+ // throw away event.
+ if !ok {
+ return
+ }
+ case <-time.After(20 * time.Millisecond):
+ break consume
+ }
+ }
+}
diff --git a/database/watcher/watcher_test.go b/database/watcher/watcher_test.go
new file mode 100644
index 00000000..fcbcc4eb
--- /dev/null
+++ b/database/watcher/watcher_test.go
@@ -0,0 +1,1488 @@
+//go:build testing
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package watcher_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/suite"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/database"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type WatcherTestSuite struct {
+ suite.Suite
+ store common.Store
+ ctx context.Context
+}
+
+func (s *WatcherTestSuite) SetupTest() {
+ ctx := context.TODO()
+ watcher.InitWatcher(ctx)
+ store, err := database.NewDatabase(ctx, garmTesting.GetTestSqliteDBConfig(s.T()))
+ if err != nil {
+ s.T().Fatalf("failed to create db connection: %s", err)
+ }
+ s.store = store
+}
+
+func (s *WatcherTestSuite) TearDownTest() {
+ s.store = nil
+ currentWatcher := watcher.GetWatcher()
+ if currentWatcher != nil {
+ currentWatcher.Close()
+ watcher.SetWatcher(nil)
+ }
+}
+
+func (s *WatcherTestSuite) TestRegisterConsumerTwiceWillError() {
+ consumer, err := watcher.RegisterConsumer(s.ctx, "test")
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ consumer, err = watcher.RegisterConsumer(s.ctx, "test")
+ s.Require().ErrorIs(err, common.ErrConsumerAlreadyRegistered)
+ s.Require().Nil(consumer)
+}
+
+func (s *WatcherTestSuite) TestRegisterProducerTwiceWillError() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ producer, err = watcher.RegisterProducer(s.ctx, "test")
+ s.Require().ErrorIs(err, common.ErrProducerAlreadyRegistered)
+ s.Require().Nil(producer)
+}
+
+func (s *WatcherTestSuite) TestInitWatcherRanTwiceDoesNotReplaceWatcher() {
+ ctx := context.TODO()
+ currentWatcher := watcher.GetWatcher()
+ s.Require().NotNil(currentWatcher)
+ watcher.InitWatcher(ctx)
+ newWatcher := watcher.GetWatcher()
+ s.Require().Equal(currentWatcher, newWatcher)
+}
+
+func (s *WatcherTestSuite) TestRegisterConsumerFailsIfWatcherIsNotInitialized() {
+ s.store = nil
+ currentWatcher := watcher.GetWatcher()
+ currentWatcher.Close()
+
+ consumer, err := watcher.RegisterConsumer(s.ctx, "test")
+ s.Require().Nil(consumer)
+ s.Require().ErrorIs(err, common.ErrWatcherNotInitialized)
+}
+
+func (s *WatcherTestSuite) TestRegisterProducerFailsIfWatcherIsNotInitialized() {
+ s.store = nil
+ currentWatcher := watcher.GetWatcher()
+ currentWatcher.Close()
+
+ producer, err := watcher.RegisterProducer(s.ctx, "test")
+ s.Require().Nil(producer)
+ s.Require().ErrorIs(err, common.ErrWatcherNotInitialized)
+}
+
+func (s *WatcherTestSuite) TestProducerAndConsumer() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityTypeFilter(common.ControllerEntityType),
+ watcher.WithOperationTypeFilter(common.UpdateOperation))
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.UpdateOperation,
+ Payload: "test",
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := <-consumer.Watch()
+ s.Require().Equal(payload, receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestConsumeWithFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityTypeFilter(common.ControllerEntityType),
+ watcher.WithOperationTypeFilter(common.UpdateOperation))
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.UpdateOperation,
+ Payload: "test",
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.CreateOperation,
+ Payload: "test",
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithAnyFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithAny(
+ watcher.WithEntityTypeFilter(common.ControllerEntityType),
+ watcher.WithEntityFilter(params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ }),
+ ))
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.UpdateOperation,
+ Payload: "test",
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ // We're not watching for this repo
+ payload = common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ // We're not watching for orgs
+ payload = common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithAllFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithAll(
+ watcher.WithEntityFilter(params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ }),
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ ))
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.CreateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func maybeInitController(db common.Store) error {
+ if _, err := db.ControllerInfo(); err == nil {
+ return nil
+ }
+
+ if _, err := db.InitController(); err != nil {
+ return fmt.Errorf("error initializing controller: %w", err)
+ }
+
+ return nil
+}
+
+func (s *WatcherTestSuite) TestWithEntityPoolFilterRepository() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityPoolFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ RepoID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ RepoID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityPoolFilterOrg() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityPoolFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ OrgID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ OrgID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityPoolFilterEnterprise() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeEnterprise,
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityPoolFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ EnterpriseID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ // Invalid payload for declared entity type
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityPoolFilterBogusEntityType() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ // This should trigger the default branch in the filter and
+ // return false
+ EntityType: params.ForgeEntityType("bogus"),
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityPoolFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ EnterpriseID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterRepository() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ RepoID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ RepoID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterOrg() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ OrgID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ OrgID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterEnterprise() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeEnterprise,
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterBogusEntityType() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ // This should trigger the default branch in the filter and
+ // return false
+ EntityType: params.ForgeEntityType("bogus"),
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterReturnsFalseForGiteaEndpoints() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GiteaEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ RepoID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityFilterRepository() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test",
+ Name: "test",
+ Owner: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test2",
+ Name: "test",
+ Owner: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityFilterOrg() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Organization{
+ ID: "test",
+ Name: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Organization{
+ ID: "test2",
+ Name: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityFilterEnterprise() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeEnterprise,
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Enterprise{
+ ID: "test",
+ Name: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Enterprise{
+ ID: "test2",
+ Name: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityJobFilterRepository() {
+ repoUUID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ repoUUID2, err := uuid.NewUUID()
+ s.Require().NoError(err)
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: repoUUID.String(),
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityJobFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ RepoID: &repoUUID,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ RepoID: &repoUUID2,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityJobFilterOrg() {
+ orgUUID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ orgUUID2, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ ID: orgUUID.String(),
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityJobFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ OrgID: &orgUUID,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ OrgID: &orgUUID2,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityJobFilterEnterprise() {
+ entUUID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ entUUID2, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeEnterprise,
+ Name: "test",
+ ID: entUUID.String(),
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityJobFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ EnterpriseID: &entUUID,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ EnterpriseID: &entUUID2,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityJobFilterBogusEntityType() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ // This should trigger the default branch in the filter and
+ // return false
+ EntityType: params.ForgeEntityType("bogus"),
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityJobFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ EnterpriseID: nil,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ EnterpriseID: nil,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithNone() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithNone(),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test",
+ Name: "test",
+ Owner: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithUserIDFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ userID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ userID2, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithUserIDFilter(userID.String()),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.UserEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.User{
+ ID: userID.String(),
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.UserEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.User{
+ ID: userID2.String(),
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.UserEntityType,
+ Operation: common.UpdateOperation,
+ // Declare as user, but payload is a pool. Filter should return false.
+ Payload: params.Pool{},
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithForgeCredentialsGithub() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ creds := params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ ID: 1,
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithForgeCredentialsFilter(creds),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.GithubCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ ID: 1,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.GiteaCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ForgeCredentials{
+ ForgeType: params.GiteaEndpointType,
+ ID: 1,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.GiteaCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{},
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithcaleSetFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ scaleSet := params.ScaleSet{
+ ID: 1,
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithScaleSetFilter(scaleSet),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ Name: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 2,
+ Name: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{},
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithExcludeEntityTypeFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithExcludeEntityTypeFilter(common.RepositoryEntityType),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test",
+ Name: "test",
+ Owner: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test",
+ Name: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithInstanceStatusFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithInstanceStatusFilter(
+ commonParams.InstanceCreating,
+ commonParams.InstanceDeleting),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Instance{
+ ID: "test-instance",
+ Status: commonParams.InstanceCreating,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Instance{
+ ID: "test-instance",
+ Status: commonParams.InstanceDeleted,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Instance{
+ ID: "test-instance",
+ Status: commonParams.InstanceDeleting,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+}
+
+func TestWatcherTestSuite(t *testing.T) {
+ // Watcher tests
+ watcherSuite := &WatcherTestSuite{
+ ctx: context.TODO(),
+ }
+ suite.Run(t, watcherSuite)
+
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
+
+ store, err := database.NewDatabase(ctx, garmTesting.GetTestSqliteDBConfig(t))
+ if err != nil {
+ t.Fatalf("failed to create db connection: %s", err)
+ }
+
+ err = maybeInitController(store)
+ if err != nil {
+ t.Fatalf("failed to init controller: %s", err)
+ }
+
+ adminCtx := garmTesting.ImpersonateAdminContext(ctx, store, t)
+ watcherStoreSuite := &WatcherStoreTestSuite{
+ ctx: adminCtx,
+ store: store,
+ }
+ suite.Run(t, watcherStoreSuite)
+}
diff --git a/doc/building_from_source.md b/doc/building_from_source.md
index 9058820e..e5d2d0fd 100644
--- a/doc/building_from_source.md
+++ b/doc/building_from_source.md
@@ -6,12 +6,13 @@ First, clone the repository:
```bash
git clone https://github.com/cloudbase/garm
+cd garm
```
Then build garm:
```bash
-make
+make build
```
You should now have both `garm` and `garm-cli` available in the `./bin` folder.
@@ -22,4 +23,65 @@ If you have docker/podman installed, you can also build a static binary against
make build-static
```
-This command will also build for both AMD64 and ARM64. Resulting binaries will be in the `./bin` folder.
\ No newline at end of file
+This command will also build for both AMD64 and ARM64. Resulting binaries will be in the `./bin` folder.
+
+## Hacking
+
+If you're hacking on GARM and want to override the default version GARM injects, you can run the following command:
+
+```bash
+VERSION=v1.0.0 make build
+```
+
+> [!IMPORTANT]
+> This only works for `make build`. The `make build-static` command does not support version overrides.
+
+## The Web UI SPA
+
+GARM now ships with a single page application. The application is written in svelte and tailwind CSS. To rebuild it or hack on it, you will need a number of dependencies installed and placed in your `$PATH`.
+
+### Prerequisites
+
+- **Node.js 24+** and **npm**
+- **Go 1.21+** (for building the GARM backend)
+- **openapi-generator-cli** in your PATH (for API client generation)
+
+### Installing openapi-generator-cli
+
+**Option 1: NPM Global Install**
+```bash
+npm install -g @openapitools/openapi-generator-cli
+```
+
+**Option 2: Manual Install**
+Download from [OpenAPI Generator releases](https://github.com/OpenAPITools/openapi-generator/releases) and add to your PATH.
+
+**Verify Installation:**
+
+```bash
+openapi-generator-cli version
+```
+
+
+
+### Hacking on the Web UI
+
+If you need to change something in the `webapp/src` folder, make sure to rebuild the webapp before rebuilding GARM:
+
+```bash
+make build-webui
+make build
+```
+
+> [!IMPORTANT]
+> The Web UI that GARM ships with has `go generate` stanzas that require `@openapitools/openapi-generator-cli` and `tailwindcss` to be installed. You will also have to make sure that if you change API models, the Web UI still works, as adding new fields or changing the json tags of old fields will change accessors in the client code.
+
+### Changing API models
+
+If you need to change the models in the `params/` package, you will also need to regenerate the client both for garm-cli and for the web application we ship with GARM. To do this, you can run:
+
+```bash
+make generate
+```
+
+You will also need to make sure that the web app still works.
diff --git a/doc/config.md b/doc/config.md
new file mode 100644
index 00000000..3c67e1b4
--- /dev/null
+++ b/doc/config.md
@@ -0,0 +1,482 @@
+# Configuration
+
+The ```GARM``` configuration is a simple ```toml```. The sample config file in [the testdata folder](/testdata/config.toml) is fairly well commented and should be enough to get you started. The configuration file is split into several sections, each of which is documented in its own page. The sections are:
+
+
+
+- [Configuration](#configuration)
+ - [The default config section](#the-default-config-section)
+ - [The callback_url option](#the-callback_url-option)
+ - [The metadata_url option](#the-metadata_url-option)
+ - [The debug_server option](#the-debug_server-option)
+ - [The log_file option](#the-log_file-option)
+ - [Rotating log files](#rotating-log-files)
+ - [The enable_log_streamer option](#the-enable_log_streamer-option)
+ - [The logging section](#the-logging-section)
+ - [Database configuration](#database-configuration)
+ - [Provider configuration](#provider-configuration)
+ - [Providers](#providers)
+ - [Available external providers](#available-external-providers)
+ - [The metrics section](#the-metrics-section)
+ - [Common metrics](#common-metrics)
+ - [Enterprise metrics](#enterprise-metrics)
+ - [Organization metrics](#organization-metrics)
+ - [Repository metrics](#repository-metrics)
+ - [Provider metrics](#provider-metrics)
+ - [Pool metrics](#pool-metrics)
+ - [Runner metrics](#runner-metrics)
+ - [Github metrics](#github-metrics)
+ - [Enabling metrics](#enabling-metrics)
+ - [Configuring prometheus](#configuring-prometheus)
+ - [The JWT authentication config section](#the-jwt-authentication-config-section)
+ - [The API server config section](#the-api-server-config-section)
+
+
+
+## The default config section
+
+The `default` config section holds configuration options that don't need a category of their own, but are essential to the operation of the service. In this section we will detail each of the options available in the `default` section.
+
+```toml
+[default]
+# Uncomment this line if you'd like to log to a file instead of standard output.
+# log_file = "/tmp/runner-manager.log"
+
+# Enable streaming logs via web sockets. Use garm-cli debug-log.
+enable_log_streamer = false
+
+# Enable the golang debug server. See the documentation in the "doc" folder for more information.
+debug_server = false
+```
+
+### The callback_url option
+
+Your runners will call back home with status updates as they install. Once they are set up, they will also send the GitHub agent ID they were allocated. You will need to configure the ```callback_url``` option in the ```garm``` server config. This URL needs to point to the following API endpoint:
+
+ ```txt
+ POST /api/v1/callbacks/status
+ ```
+
+Example of a runner sending status updates:
+
+ ```bash
+ garm-cli runner show garm-DvxiVAlfHeE7
+ +-----------------+------------------------------------------------------------------------------------+
+ | FIELD | VALUE |
+ +-----------------+------------------------------------------------------------------------------------+
+ | ID | 16b96ba2-d406-45b8-ab66-b70be6237b4e |
+ | Provider ID | garm-DvxiVAlfHeE7 |
+ | Name | garm-DvxiVAlfHeE7 |
+ | OS Type | linux |
+ | OS Architecture | amd64 |
+ | OS Name | ubuntu |
+ | OS Version | jammy |
+ | Status | running |
+ | Runner Status | idle |
+ | Pool ID | 8ec34c1f-b053-4a5d-80d6-40afdfb389f9 |
+ | Addresses | 10.198.117.120 |
+ | Status Updates | 2023-07-08T06:26:46: runner registration token was retrieved |
+ | | 2023-07-08T06:26:46: using cached runner found in /opt/cache/actions-runner/latest |
+ | | 2023-07-08T06:26:50: configuring runner |
+ | | 2023-07-08T06:26:56: runner successfully configured after 1 attempt(s) |
+ | | 2023-07-08T06:26:56: installing runner service |
+ | | 2023-07-08T06:26:56: starting service |
+ | | 2023-07-08T06:26:57: runner successfully installed |
+ +-----------------+------------------------------------------------------------------------------------+
+
+ ```
+
+This URL must be set and must be accessible by the instance. If you wish to restrict access to it, a reverse proxy can be configured to accept requests only from networks in which the runners ```garm``` manages will be spun up. This URL doesn't need to be globally accessible, it just needs to be accessible by the instances.
+
+For example, in a scenario where you expose the API endpoint directly, this setting could look like the following:
+
+ ```toml
+ callback_url = "https://garm.example.com/api/v1/callbacks"
+ ```
+
+Authentication is done using a short-lived JWT token, that gets generated for a particular instance that we are spinning up. That JWT token grants access to the instance to only update its own status and to fetch metadata for itself. No other API endpoints will work with that JWT token. The validity of the token is equal to the pool bootstrap timeout value (default 20 minutes) plus the garm polling interval (5 minutes).
+
+There is a sample ```nginx``` config [in the testdata folder](/testdata/nginx-server.conf). Feel free to customize it in any way you see fit.
+
+### The metadata_url option
+
+The metadata URL is the base URL for any information an instance may need to fetch in order to finish setting itself up. As this URL may be placed behind a reverse proxy, you'll need to configure it in the ```garm``` config file. Ultimately this URL will need to point to the following ```garm``` API endpoint:
+
+ ```bash
+ GET /api/v1/metadata
+ ```
+
+This URL needs to be accessible only by the instances ```garm``` sets up. This URL will not be used by anyone else. To configure it in ```garm``` add the following line in the ```[default]``` section of your ```garm``` config:
+
+ ```toml
+ metadata_url = "https://garm.example.com/api/v1/metadata"
+ ```
+
+### The debug_server option
+
+GARM can optionally enable the golang profiling server. This is useful if you suspect garm may be have a bottleneck in any way. To enable the profiling server, add the following section to the garm config:
+
+```toml
+[default]
+
+debug_server = true
+```
+
+And restart garm. You can then use the following command to start profiling:
+
+```bash
+go tool pprof http://127.0.0.1:9997/debug/pprof/profile?seconds=120
+```
+
+> **IMPORTANT NOTE on profiling when behind a reverse proxy**: The above command will hang for a fairly long time. Most reverse proxies will timeout after about 60 seconds. To avoid this, you should only profile on localhost by connecting directly to garm.
+
+It's also advisable to exclude the debug server URLs from your reverse proxy and only make them available locally.
+
+Now that the debug server is enabled, here is a blog post on how to profile golang applications: https://blog.golang.org/profiling-go-programs
+
+
+### The log_file option
+
+By default, GARM logs everything to standard output.
+
+You can optionally log to file by adding the following to your config file:
+
+```toml
+[default]
+# Use this if you'd like to log to a file instead of standard output.
+log_file = "/tmp/runner-manager.log"
+```
+
+#### Rotating log files
+
+GARM automatically rotates the log if it reaches 500 MB in size or 28 days, whichever comes first.
+
+However, if you want to manually rotate the log file, you can send a `SIGHUP` signal to the GARM process.
+
+You can add the following to your systemd unit file to enable `reload`:
+
+```ini
+[Service]
+ExecReload=/bin/kill -HUP $MAINPID
+```
+
+Then you can simply:
+
+```bash
+systemctl reload garm
+```
+
+### The enable_log_streamer option
+
+This option allows you to stream garm logs directly to your terminal. Set this option to true, then you can use the following command to stream logs:
+
+```bash
+garm-cli debug-log
+```
+
+An important note on enabling this option when behind a reverse proxy. The log streamer uses websockets to stream logs to you. You will need to configure your reverse proxy to allow websocket connections. If you're using nginx, you will need to add the following to your nginx `server` config:
+
+```nginx
+location /api/v1/ws {
+ proxy_pass http://garm_backend;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "Upgrade";
+ proxy_set_header Host $host;
+}
+```
+
+## The logging section
+
+GARM has switched to the `slog` package for logging, adding structured logging. As such, we added a dedicated `logging` section to the config to tweak the logging settings. We moved the `enable_log_streamer` and the `log_file` options from the `default` section to the `logging` section. They are still available in the `default` section for backwards compatibility, but they are deprecated and will be removed in a future release.
+
+An example of the new `logging` section:
+
+```toml
+[logging]
+# Uncomment this line if you'd like to log to a file instead of standard output.
+# log_file = "/tmp/runner-manager.log"
+
+# enable_log_streamer enables streaming the logs over websockets
+enable_log_streamer = true
+# log_format is the output format of the logs. GARM uses structured logging and can
+# output as "text" or "json"
+log_format = "text"
+# log_level is the logging level GARM will output. Available log levels are:
+# * debug
+# * info
+# * warn
+# * error
+log_level = "debug"
+# log_source will output information about the function that generated the log line.
+log_source = false
+```
+
+By default GARM logs everything to standard output. You can optionally log to file by adding the `log_file` option to the `logging` section. The `enable_log_streamer` option allows you to stream GARM logs directly to your terminal. Set this option to `true`, then you can use the following command to stream logs:
+
+```bash
+garm-cli debug-log
+```
+
+The `log_format`, `log_level` and `log_source` options allow you to tweak the logging output. The `log_format` option can be set to `text` or `json`. The `log_level` option can be set to `debug`, `info`, `warn` or `error`. The `log_source` option will output information about the function that generated the log line. All these options influence how the structured logging is output.
+
+This will allow you to ingest GARM logs in a central location such as an ELK stack or similar.
+
+## Database configuration
+
+GARM currently supports SQLite3. Support for other stores will be added in the future.
+
+```toml
+[database]
+ # Turn on/off debugging for database queries.
+ debug = false
+ # Database backend to use. Currently supported backends are:
+ # * sqlite3
+ backend = "sqlite3"
+ # the passphrase option is a temporary measure by which we encrypt the webhook
+ # secret that gets saved to the database, using AES256. In the future, secrets
+ # will be saved to something like Barbican or Vault, eliminating the need for
+ # this. This string needs to be 32 characters in size.
+ passphrase = "shreotsinWadquidAitNefayctowUrph"
+ [database.sqlite3]
+ # Path on disk to the sqlite3 database file.
+ db_file = "/home/runner/garm.db"
+```
+
+## Provider configuration
+
+GARM was designed to be extensible. Providers can be written as external executables which implement the needed interface to create/delete/list compute systems that are used by ```GARM``` to create runners.
+
+### Providers
+
+GARM delegates the functionality needed to create the runners to external executables. These executables can be either binaries or scripts. As long as they adhere to the needed interface, they can be used to create runners in any target IaaS. You might find this behavior familiar if you've ever had to deal with installing `CNIs` in `containerd`. The principle is the same.
+
+The configuration for an external provider is quite simple:
+
+```toml
+# This is an example external provider. External providers are executables that
+# implement the needed interface to create/delete/list compute systems that are used
+# by GARM to create runners.
+[[provider]]
+name = "openstack_external"
+description = "external openstack provider"
+provider_type = "external"
+ [provider.external]
+ # config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable
+ config_file = "/etc/garm/providers.d/openstack/keystonerc"
+ # Absolute path to an executable that implements the provider logic. This executable can be
+ # anything (bash, a binary, python, etc). See documentation in this repo on how to write an
+ # external provider.
+ provider_executable = "/etc/garm/providers.d/openstack/garm-external-provider"
+ # This option will pass all environment variables that start with AWS_ to the provider.
+ # To pass in individual variables, you can add the entire name to the list.
+ environment_variables = ["AWS_"]
+```
+
+The external provider has three options:
+
+* `provider_executable`
+* `config_file`
+* `environment_variables`
+
+The ```provider_executable``` option is the absolute path to an executable that implements the provider logic. GARM will delegate all provider operations to this executable. This executable can be anything (bash, python, perl, go, etc). See [Writing an external provider](./external_provider.md) for more details.
+
+The ```config_file``` option is a path on disk to an arbitrary file, that is passed to the external executable via the environment variable ```GARM_PROVIDER_CONFIG_FILE```. This file is only relevant to the external provider. GARM itself does not read it. Let's take the [OpenStack provider](https://github.com/cloudbase/garm-provider-openstack) as an example. The [config file](https://github.com/cloudbase/garm-provider-openstack/blob/ac46d4d5a542bca96cd0309c89437d3382c3ea26/testdata/config.toml) contains access information for an OpenStack cloud as well as some provider specific options like whether or not to boot from volume and which tenant network to use.
+
+The `environment_variables` option is a list of environment variables that will be passed to the external provider. By default GARM will pass a clean env to providers, consisting only of variables that the [provider interface](./external_provider.md) expects. However, in some situations, provider may need access to certain environment variables set in the env of GARM itself. This might be needed to enable access to IAM roles (ec2) or managed identity (azure). This option takes a list of environment variables or prefixes of environment variables that will be passed to the provider. For example, if you want to pass all environment variables that start with `AWS_` to the provider, you can set this option to `["AWS_"]`.
+
+If you want to implement an external provider, you can use this file for anything you need to pass into the binary when ```GARM``` calls it to execute a particular operation.
+
+#### Available external providers
+
+For non-testing purposes, these are the external providers currently available:
+
+* [OpenStack](https://github.com/cloudbase/garm-provider-openstack)
+* [Azure](https://github.com/cloudbase/garm-provider-azure)
+* [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider!
+* [LXD](https://github.com/cloudbase/garm-provider-lxd)
+* [Incus](https://github.com/cloudbase/garm-provider-incus)
+* [Equinix Metal](https://github.com/cloudbase/garm-provider-equinix)
+* [Amazon EC2](https://github.com/cloudbase/garm-provider-aws)
+* [Google Cloud Platform (GCP)](https://github.com/cloudbase/garm-provider-gcp)
+* [Oracle Cloud Infrastructure (OCI)](https://github.com/cloudbase/garm-provider-oci)
+
+Details on how to install and configure them are available in their respective repositories.
+
+If you wrote a provider and would like to add it to the above list, feel free to open a PR.
+
+
+## The metrics section
+
+This is one of the features in GARM that I really love having. For one thing, it's community contributed and for another, it really adds value to the project. It allows us to create some pretty nice visualizations of what is happening with GARM.
+
+### Common metrics
+
+| Metric name | Type | Labels | Description |
+|--------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------|
+| `garm_health` | Gauge | `controller_id`=<controller id> `callback_url`=<callback url> `controller_webhook_url`=<controller webhook url> `metadata_url`=<metadata url> `webhook_url`=<webhook url> `name`=<hostname> | This is a gauge that is set to 1 if GARM is healthy and 0 if it is not. This is useful for alerting. |
+| `garm_webhooks_received` | Counter | `valid`=<valid request> `reason`=<reason for invalid requests> | This is a counter that increments every time GARM receives a webhook from GitHub. |
+
+### Enterprise metrics
+
+| Metric name | Type | Labels | Description |
+|---------------------------------------|-------|-------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|
+| `garm_enterprise_info` | Gauge | `id`=<enterprise id> `name`=<enterprise name> | This is a gauge that is set to 1 and expose enterprise information |
+| `garm_enterprise_pool_manager_status` | Gauge | `id`=<enterprise id> `name`=<enterprise name> `running`=<true\|false> | This is a gauge that is set to 1 if the enterprise pool manager is running and set to 0 if not |
+
+### Organization metrics
+
+| Metric name | Type | Labels | Description |
+|-----------------------------------------|-------|-----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------|
+| `garm_organization_info` | Gauge | `id`=<organization id> `name`=<organization name> | This is a gauge that is set to 1 and expose organization information |
+| `garm_organization_pool_manager_status` | Gauge | `id`=<organization id> `name`=<organization name> `running`=<true\|false> | This is a gauge that is set to 1 if the organization pool manager is running and set to 0 if not |
+
+### Repository metrics
+
+| Metric name | Type | Labels | Description |
+|---------------------------------------|-------|-------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|
+| `garm_repository_info` | Gauge | `id`=<repository id> `name`=<repository name> | This is a gauge that is set to 1 and expose repository information |
+| `garm_repository_pool_manager_status` | Gauge | `id`=<repository id> `name`=<repository name> `running`=<true\|false> | This is a gauge that is set to 1 if the repository pool manager is running and set to 0 if not |
+
+### Provider metrics
+
+| Metric name | Type | Labels | Description |
+|----------------------|-------|-------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------|
+| `garm_provider_info` | Gauge | `description`=<provider description> `name`=<provider name> `type`=<internal\|external> | This is a gauge that is set to 1 and expose provider information |
+
+### Pool metrics
+
+| Metric name | Type | Labels | Description |
+|-------------------------------|-------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------|
+| `garm_pool_info` | Gauge | `flavor`=<flavor> `id`=<pool id> `image`=<image name> `os_arch`=<defined OS arch> `os_type`=<defined OS name> `pool_owner`=<owner name> `pool_type`=<repository\|organization\|enterprise> `prefix`=<prefix> `provider`=<provider name> `tags`=<concatenated list of pool tags> | This is a gauge that is set to 1 and expose pool information |
+| `garm_pool_status` | Gauge | `enabled`=<true\|false> `id`=<pool id> | This is a gauge that is set to 1 if the pool is enabled and set to 0 if not |
+| `garm_pool_bootstrap_timeout` | Gauge | `id`=<pool id> | This is a gauge that is set to the pool bootstrap timeout |
+| `garm_pool_max_runners` | Gauge | `id`=<pool id> | This is a gauge that is set to the pool max runners |
+| `garm_pool_min_idle_runners` | Gauge | `id`=<pool id> | This is a gauge that is set to the pool min idle runners |
+
+### Runner metrics
+
+| Metric name | Type | Labels | Description |
+|--------------------------------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|
+| `garm_runner_status` | Gauge | `name`=<runner name> `pool_owner`=<owner name> `pool_type`=<repository\|organization\|enterprise> `provider`=<provider name> `runner_status`=<running\|stopped\|error\|pending_delete\|deleting\|pending_create\|creating\|unknown> `status`=<idle\|pending\|terminated\|installing\|failed\|active> | This is a gauge value that gives us details about the runners garm spawns |
+| `garm_runner_operations_total` | Counter | `provider`=<provider name> `operation`=<CreateInstance\|DeleteInstance\|GetInstance\|ListInstances\|RemoveAllInstances\|Start\Stop> | This is a counter that increments every time a runner operation is performed |
+| `garm_runner_errors_total` | Counter | `provider`=<provider name> `operation`=<CreateInstance\|DeleteInstance\|GetInstance\|ListInstances\|RemoveAllInstances\|Start\Stop> | This is a counter that increments every time a runner operation errored |
+
+### Github metrics
+
+| Metric name | Type | Labels | Description |
+|--------------------------------|---------|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|
+| `garm_github_operations_total` | Counter | `operation`=<ListRunners\|CreateRegistrationToken\|...> `scope`=<Organization\|Repository\|Enterprise> | This is a counter that increments every time a github operation is performed |
+| `garm_github_errors_total` | Counter | `operation`=<ListRunners\|CreateRegistrationToken\|...> `scope`=<Organization\|Repository\|Enterprise> | This is a counter that increments every time a github operation errored |
+
+### Enabling metrics
+
+Metrics are disabled by default. To enable them, add the following to your config file:
+
+```toml
+[metrics]
+
+# Toggle to disable authentication (not recommended) on the metrics endpoint.
+# If you do disable authentication, I encourage you to put a reverse proxy in front
+# of garm and limit which systems can access that particular endpoint. Ideally, you
+# would enable some kind of authentication using the reverse proxy, if the built-in auth
+# is not sufficient for your needs.
+#
+# Default: false
+disable_auth = true
+
+# Toggle metrics. If set to false, the API endpoint for metrics collection will
+# be disabled.
+#
+# Default: false
+enable = true
+
+# period is the time interval when the /metrics endpoint will update internal metrics about
+# controller specific objects (e.g. runners, pools, etc.)
+#
+# Default: "60s"
+period = "30s"
+```
+
+You can choose to disable authentication if you wish, however it's not terribly difficult to set up, so I generally advise against disabling it.
+
+### Configuring prometheus
+
+The following section assumes that your garm instance is running at `garm.example.com` and has TLS enabled.
+
+First, generate a new JWT token valid only for the metrics endpoint:
+
+```bash
+garm-cli metrics-token create
+```
+
+Note: The token validity is equal to the TTL you set in the [JWT config section](#the-jwt-authentication-config-section).
+
+Copy the resulting token, and add it to your prometheus config file. The following is an example of how to add garm as a target in your prometheus config file:
+
+```yaml
+scrape_configs:
+ - job_name: "garm"
+ # Connect over https. If you don't have TLS enabled, change this to http.
+ scheme: https
+ static_configs:
+ - targets: ["garm.example.com"]
+ authorization:
+ credentials: "superSecretTokenYouGeneratedEarlier"
+```
+
+## The JWT authentication config section
+
+This section configures the JWT authentication used by the API server. GARM is currently a single user system and that user has the right to do anything and everything GARM is capable of. As a result, the JWT auth we have does not include a refresh token. The token is valid for the duration of the time to live (TTL) set in the config file. Once the token expires, you will need to log in again.
+
+It is recommended that the secret be a long, randomly generated string. Changing the secret at any time will invalidate all existing tokens.
+
+```toml
+[jwt_auth]
+# A JWT token secret used to sign tokens. Obviously, this needs to be changed :).
+secret = ")9gk_4A6KrXz9D2u`0@MPea*sd6W`%@5MAWpWWJ3P3EqW~qB!!(Vd$FhNc*eU4vG"
+
+# Time to live for tokens. Both the instances and you will use JWT tokens to
+# authenticate against the API. However, this TTL is applied only to tokens you
+# get when logging into the API. The tokens issued to the instances we manage,
+# have a TTL based on the runner bootstrap timeout set on each pool. The minimum
+# TTL for this token is 24h.
+time_to_live = "8760h"
+```
+
+## The API server config section
+
+This section allows you to configure the GARM API server. The API server is responsible for serving all the API endpoints used by the `garm-cli`, the runners that phone home their status and by GitHub when it sends us webhooks.
+
+The config options are fairly straight forward.
+
+```toml
+[apiserver]
+ # Bind the API to this IP
+ bind = "0.0.0.0"
+ # Bind the API to this port
+ port = 9997
+ # Whether or not to set up TLS for the API endpoint. If this is set to true,
+ # you must have a valid apiserver.tls section.
+ use_tls = false
+ # Set a list of allowed origins
+ # By default, if this option is omitted or empty, we will check
+ # only that the origin is the same as the originating server.
+ # A literal of "*" will allow any origin
+ cors_origins = ["*"]
+ [apiserver.tls]
+ # Path on disk to a x509 certificate bundle.
+ # NOTE: if your certificate is signed by an intermediary CA, this file
+ # must contain the entire certificate bundle needed for clients to validate
+ # the certificate. This usually means concatenating the certificate and the
+ # CA bundle you received.
+ certificate = ""
+ # The path on disk to the corresponding private key for the certificate.
+ key = ""
+ [apiserver.webui]
+ enable = true
+```
+
+The GARM API server has the option to enable TLS, but I suggest you use a reverse proxy and enable TLS termination in that reverse proxy. There is an `nginx` sample in this repository with TLS termination enabled.
+
+You can of course enable TLS in both garm and the reverse proxy. The choice is yours.
\ No newline at end of file
diff --git a/doc/config_api_server.md b/doc/config_api_server.md
deleted file mode 100644
index 33f0f4ea..00000000
--- a/doc/config_api_server.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# The API server config section
-
-This section allows you to configure the GARM API server. The API server is responsible for serving all the API endpoints used by the `garm-cli`, the runners that phone home their status and by GitHub when it sends us webhooks.
-
-The config options are fairly straight forward.
-
-```toml
-[apiserver]
- # Bind the API to this IP
- bind = "0.0.0.0"
- # Bind the API to this port
- port = 9997
- # Whether or not to set up TLS for the API endpoint. If this is set to true,
- # you must have a valid apiserver.tls section.
- use_tls = false
- # Set a list of allowed origins
- # By default, if this option is ommited or empty, we will check
- # only that the origin is the same as the originating server.
- # A literal of "*" will allow any origin
- cors_origins = ["*"]
- [apiserver.tls]
- # Path on disk to a x509 certificate bundle.
- # NOTE: if your certificate is signed by an intermediary CA, this file
- # must contain the entire certificate bundle needed for clients to validate
- # the certificate. This usually means concatenating the certificate and the
- # CA bundle you received.
- certificate = ""
- # The path on disk to the corresponding private key for the certificate.
- key = ""
-```
-
-The GARM API server has the option to enable TLS, but I suggest you use a reverse proxy and enable TLS termination in that reverse proxy. There is an `nginx` sample in this repository with TLS termination enabled.
-
-You can of course enable TLS in both garm and the reverse proxy. The choice is yours.
\ No newline at end of file
diff --git a/doc/config_default.md b/doc/config_default.md
deleted file mode 100644
index ea95132e..00000000
--- a/doc/config_default.md
+++ /dev/null
@@ -1,169 +0,0 @@
-# The default config section
-
-The `default` config section holds configuration options that don't need a category of their own, but are essential to the operation of the service. In this section we will detail each of the options available in the `default` section.
-
-```toml
-[default]
-# This URL is used by instances to send back status messages as they install
-# the github actions runner. Status messages can be seen by querying the
-# runner status in garm.
-# Note: If you're using a reverse proxy in front of your garm installation,
-# this URL needs to point to the address of the reverse proxy. Using TLS is
-# highly encouraged.
-callback_url = "https://garm.example.com/api/v1/callbacks/status"
-
-# This URL is used by instances to retrieve information they need to set themselves
-# up. Access to this URL is granted using the same JWT token used to send back
-# status updates. Once the instance transitions to "installed" or "failed" state,
-# access to both the status and metadata endpoints is disabled.
-# Note: If you're using a reverse proxy in front of your garm installation,
-# this URL needs to point to the address of the reverse proxy. Using TLS is
-# highly encouraged.
-metadata_url = "https://garm.example.com/api/v1/metadata"
-
-# Uncomment this line if you'd like to log to a file instead of standard output.
-# log_file = "/tmp/runner-manager.log"
-
-# Enable streaming logs via web sockets. Use garm-cli debug-log.
-enable_log_streamer = false
-
-# Enable the golang debug server. See the documentation in the "doc" folder for more information.
-debug_server = false
-```
-
-## The callback_url option
-
-Your runners will call back home with status updates as they install. Once they are set up, they will also send the GitHub agent ID they were allocated. You will need to configure the ```callback_url``` option in the ```garm``` server config. This URL needs to point to the following API endpoint:
-
- ```txt
- POST /api/v1/callbacks/status
- ```
-
-Example of a runner sending status updates:
-
- ```bash
- garm-cli runner show garm-DvxiVAlfHeE7
- +-----------------+------------------------------------------------------------------------------------+
- | FIELD | VALUE |
- +-----------------+------------------------------------------------------------------------------------+
- | ID | 16b96ba2-d406-45b8-ab66-b70be6237b4e |
- | Provider ID | garm-DvxiVAlfHeE7 |
- | Name | garm-DvxiVAlfHeE7 |
- | OS Type | linux |
- | OS Architecture | amd64 |
- | OS Name | ubuntu |
- | OS Version | jammy |
- | Status | running |
- | Runner Status | idle |
- | Pool ID | 8ec34c1f-b053-4a5d-80d6-40afdfb389f9 |
- | Addresses | 10.198.117.120 |
- | Status Updates | 2023-07-08T06:26:46: runner registration token was retrieved |
- | | 2023-07-08T06:26:46: using cached runner found in /opt/cache/actions-runner/latest |
- | | 2023-07-08T06:26:50: configuring runner |
- | | 2023-07-08T06:26:56: runner successfully configured after 1 attempt(s) |
- | | 2023-07-08T06:26:56: installing runner service |
- | | 2023-07-08T06:26:56: starting service |
- | | 2023-07-08T06:26:57: runner successfully installed |
- +-----------------+------------------------------------------------------------------------------------+
-
- ```
-
-This URL must be set and must be accessible by the instance. If you wish to restrict access to it, a reverse proxy can be configured to accept requests only from networks in which the runners ```garm``` manages will be spun up. This URL doesn't need to be globally accessible, it just needs to be accessible by the instances.
-
-For example, in a scenario where you expose the API endpoint directly, this setting could look like the following:
-
- ```toml
- callback_url = "https://garm.example.com/api/v1/callbacks/status"
- ```
-
-Authentication is done using a short-lived JWT token, that gets generated for a particular instance that we are spinning up. That JWT token grants access to the instance to only update it's own status and to fetch metadata for itself. No other API endpoints will work with that JWT token. The validity of the token is equal to the pool bootstrap timeout value (default 20 minutes) plus the garm polling interval (5 minutes).
-
-There is a sample ```nginx``` config [in the testdata folder](/testdata/nginx-server.conf). Feel free to customize it whichever way you see fit.
-
-## The metadata_url option
-
-The metadata URL is the base URL for any information an instance may need to fetch in order to finish setting itself up. As this URL may be placed behind a reverse proxy, you'll need to configure it in the ```garm``` config file. Ultimately this URL will need to point to the following ```garm``` API endpoint:
-
- ```bash
- GET /api/v1/metadata
- ```
-
-This URL needs to be accessible only by the instances ```garm``` sets up. This URL will not be used by anyone else. To configure it in ```garm``` add the following line in the ```[default]``` section of your ```garm``` config:
-
- ```toml
- metadata_url = "https://garm.example.com/api/v1/metadata"
- ```
-
-## The debug_server option
-
-GARM can optionally enable the golang profiling server. This is useful if you suspect garm may be bottlenecking in any way. To enable the profiling server, add the following section to the garm config:
-
-```toml
-[default]
-
-debug_server = true
-```
-
-And restart garm. You can then use the following command to start profiling:
-
-```bash
-go tool pprof http://127.0.0.1:9997/debug/pprof/profile?seconds=120
-```
-
-Important note on profiling when behind a reverse proxy. The above command will hang for a fairly long time. Most reverse proxies will timeout after about 60 seconds. To avoid this, you should only profile on localhost by connecting directly to garm.
-
-It's also advisable to exclude the debug server URLs from your reverse proxy and only make them available locally.
-
-Now that the debug server is enabled, here is a blog post on how to profile golang applications: https://blog.golang.org/profiling-go-programs
-
-
-## The log_file option
-
-By default, GARM logs everything to standard output.
-
-You can optionally log to file by adding the following to your config file:
-
-```toml
-[default]
-# Use this if you'd like to log to a file instead of standard output.
-log_file = "/tmp/runner-manager.log"
-```
-
-### Rotating log files
-
-GARM automatically rotates the log if it reaches 500 MB in size or 28 days, whichever comes first.
-
-However, if you want to manually rotate the log file, you can send a `SIGHUP` signal to the GARM process.
-
-You can add the following to your systemd unit file to enable `reload`:
-
-```ini
-[Service]
-ExecReload=/bin/kill -HUP $MAINPID
-```
-
-Then you can simply:
-
-```bash
-systemctl reload garm
-```
-
-## The enable_log_streamer option
-
-This option allows you to stream garm logs directly to your terminal. Set this option to true, then you can use the following command to stream logs:
-
-```bash
-garm-cli debug-log
-```
-
-An important note on enabling this option when behind a reverse proxy. The log streamer uses websockets to stream logs to you. You will need to configure your reverse proxy to allow websocket connections. If you're using nginx, you will need to add the following to your nginx `server` config:
-
-```nginx
-location /api/v1/ws {
- proxy_pass http://garm_backend;
- proxy_http_version 1.1;
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection "Upgrade";
- proxy_set_header Host $host;
-}
-```
\ No newline at end of file
diff --git a/doc/config_jwt_auth.md b/doc/config_jwt_auth.md
deleted file mode 100644
index 7f07d311..00000000
--- a/doc/config_jwt_auth.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# The JWT authentication config section
-
-This section configures the JWT authentication used by the API server. GARM is currently a single user system and that user has the right to do anything and everything GARM is capable of. As a result, the JWT auth we have does not include a refresh token. The token is valid for the duration of the time to live (TTL) set in the config file. Once the token expires, you will need to log in again.
-
-It is recommended that the secret be a long, randomly generated string. Changing the secret at any time will invalidate all existing tokens.
-
-```toml
-[jwt_auth]
-# A JWT token secret used to sign tokens. Obviously, this needs to be changed :).
-secret = ")9gk_4A6KrXz9D2u`0@MPea*sd6W`%@5MAWpWWJ3P3EqW~qB!!(Vd$FhNc*eU4vG"
-
-# Time to live for tokens. Both the instances and you will use JWT tokens to
-# authenticate against the API. However, this TTL is applied only to tokens you
-# get when logging into the API. The tokens issued to the instances we manage,
-# have a TTL based on the runner bootstrap timeout set on each pool. The minimum
-# TTL for this token is 24h.
-time_to_live = "8760h"
-```
\ No newline at end of file
diff --git a/doc/config_metrics.md b/doc/config_metrics.md
deleted file mode 100644
index caa50b1b..00000000
--- a/doc/config_metrics.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# The metrics section
-
-This is one of the features in GARM that I really love having. For one thing, it's community contributed and for another, it really adds value to the project. It allows us to create some pretty nice visualizations of what is happening with GARM.
-
-At the moment there are only three meaningful metrics being collected, besides the default ones that the prometheus golang package enables by default. These are:
-
-* `garm_health` - This is a gauge that is set to 1 if GARM is healthy and 0 if it is not. This is useful for alerting.
-* `garm_runner_status` - This is a gauge value that gives us details about the runners garm spawns
-* `garm_webhooks_received` - This is a counter that increments every time GARM receives a webhook from GitHub.
-
-More metrics will be added in the future.
-
-## Enabling metrics
-
-Metrics are disabled by default. To enable them, add the following to your config file:
-
-```toml
-[metrics]
-# Toggle metrics. If set to false, the API endpoint for metrics collection will
-# be disabled.
-enable = true
-# Toggle to disable authentication (not recommended) on the metrics endpoint.
-# If you do disable authentication, I encourage you to put a reverse proxy in front
-# of garm and limit which systems can access that particular endpoint. Ideally, you
-# would enable some kind of authentication using the reverse proxy, if the built-in auth
-# is not sufficient for your needs.
-disable_auth = false
-```
-
-You can choose to disable authentication if you wish, however it's not terribly difficult to set up, so I generally advise against disabling it.
-
-## Configuring prometheus
-
-The following section assumes that your garm instance is running at `garm.example.com` and has TLS enabled.
-
-First, generate a new JWT token valid only for the metrics endpoint:
-
-```bash
-garm-cli metrics-token create
-```
-
-Note: The token validity is equal to the TTL you set in the [JWT config section](/doc/config_jwt_auth.md).
-
-Copy the resulting token, and add it to your prometheus config file. The following is an example of how to add garm as a target in your prometheus config file:
-
-```yaml
-scrape_configs:
- - job_name: "garm"
- # Connect over https. If you don't have TLS enabled, change this to http.
- scheme: https
- static_configs:
- - targets: ["garm.example.com"]
- authorization:
- credentials: "superSecretTokenYouGeneratedEarlier"
-```
\ No newline at end of file
diff --git a/doc/database.md b/doc/database.md
deleted file mode 100644
index c3e1edc6..00000000
--- a/doc/database.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Database configuration
-
-GARM currently supports SQLite3. Support for other stores will be added in the future.
-
-```toml
-[database]
- # Turn on/off debugging for database queries.
- debug = false
- # Database backend to use. Currently supported backends are:
- # * sqlite3
- backend = "sqlite3"
- # the passphrase option is a temporary measure by which we encrypt the webhook
- # secret that gets saved to the database, using AES256. In the future, secrets
- # will be saved to something like Barbican or Vault, eliminating the need for
- # this.
- passphrase = "n<$n&P#L*TWqOh95_bN5J1r4mhxY7R84HZ%pvM#1vxJ<7~q%YVsCwU@Z60;7~Djo"
- [database.sqlite3]
- # Path on disk to the sqlite3 database file.
- db_file = "/home/runner/garm.db"
-```
diff --git a/doc/events.md b/doc/events.md
new file mode 100644
index 00000000..e643a5c2
--- /dev/null
+++ b/doc/events.md
@@ -0,0 +1,256 @@
+# GARM database events
+
+Starting with GARM version `v0.1.5`, we now have a new websocket endpoint that allows us to subscribe to some events that are emited by the database watcher. Whenever a database entity is created, updated or deleted, the database watcher will notify all interested consumers that an event has occured and as part of that event, we get a copy of the database entity that was affected.
+
+For example, if a new runner is created, the watcher will emit a `Create` event for the `Instances` entity and in the `Payload` field, we will have a copy of the `Instance` entity that was created. Internally, this will be a golang struct, but when exported via the websocket endpoint, it will be a JSON object, with all sensitive info (passwords, keys, secrets in general) stripped out.
+
+This document will focus on the websocket endpoint and the events that are exported by it.
+
+# Entities and operations
+
+Virtually all database entities are exposed through the events endpoint. These entities are defined in the [database common package](https://github.com/cloudbase/garm/blob/56b0e6065a993fd89c74a8b4ab7de3487544e4e0/database/common/watcher.go#L12-L21). Each of the entity types represents a database table in GARM.
+
+Those entities are:
+
+* `repository` - represents a repository in the database
+* `organization` - represents an organization in the database
+* `enterprise` - represents an enterprise in the database
+* `pool` - represents a pool in the database
+* `user` - represents a user in the database. Currently GARM is not multi tenant so we just have the "admin" user
+* `instance` - represents a runner instance in the database
+* `job` - represents a recorded github workflow job in the database
+* `controller` - represents a controller in the database. This is the GARM controller.
+* `github_credentials` - represents a github credential in the database (PAT, Apps, etc). No sensitive info (token, keys, etc) is ever returned by the events endpoint.
+* `github_endpoint` - represents a github endpoint in the database. This holds the github.com default endpoint and any GHES you may add.
+
+The operations hooked up to the events endpoint and the databse wather are:
+
+* `create` - emitted when a new entity is created
+* `update` - emitted when an entity is updated
+* `delete` - emitted when an entity is deleted
+
+# Event structure
+
+The event structure is defined in the [database common package](https://github.com/cloudbase/garm/blob/56b0e6065a993fd89c74a8b4ab7de3487544e4e0/database/common/watcher.go#L30-L34). The structure for a change payload is marshaled into a JSON object as follows:
+
+```json
+{
+ "entity-type": "repository",
+ "operation": "create"
+ "payload": [object]
+}
+```
+
+Where the `payload` will be a JSON representation of one of the entities defined above. Essentially, you can expect to receive a JSON identical to the one you would get if you made an API call to the GARM REST API for that particular entity.
+
+Note that in some cases, the `delete` operation will return the full object prior to the deletion of the entity, while others will only ever return the `ID` of the entity. This will probably be changed in future releases to only return the `ID` in case of a `delete` operation, for all entities. You should operate under the assumption that in the future, delete operations will only return the `ID` of the entity.
+
+# Subscribing to events
+
+By default the events endpoint returns no events. All events are filtered by default. To start receiving events, you need to emit a message on the websocket connection indicating the entities and/or operations you're interested in.
+
+This gives you the option to get fine grained control over what you receive at any given point in time. Of course, you can opt to receive everything and deal with the potential deluge (depends on how busy your GARM instance is) on your own.
+
+## The filter message
+
+The filter is defined as a JSON that you write over the websocket connections. That JSON must adhere to the following schema:
+
+```json
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://github.com/cloudbase/garm/apiserver/events/options",
+ "$ref": "#/$defs/Options",
+ "$defs": {
+ "Filter": {
+ "properties": {
+ "operations": {
+ "items": {
+ "type": "string",
+ "enum": [
+ "create",
+ "update",
+ "delete"
+ ]
+ },
+ "type": "array",
+ "title": "operations",
+ "description": "A list of operations to filter on"
+ },
+ "entity-type": {
+ "type": "string",
+ "enum": [
+ "repository",
+ "organization",
+ "enterprise",
+ "pool",
+ "user",
+ "instance",
+ "job",
+ "controller",
+ "github_credentials",
+ "gitea_credentials",
+ "github_endpoint",
+ "scaleset"
+ ],
+ "title": "entity type",
+ "description": "The type of entity to filter on",
+ "default": "repository"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object"
+ },
+ "Options": {
+ "properties": {
+ "send-everything": {
+ "type": "boolean",
+ "title": "send everything",
+ "default": false
+ },
+ "filters": {
+ "items": {
+ "$ref": "#/$defs/Filter"
+ },
+ "type": "array",
+ "title": "filters",
+ "description": "A list of filters to apply to the events. This is ignored when send-everything is true"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object"
+ }
+ }
+}
+```
+
+But I realize a JSON schema is not the best way to explain how to use the filter. The following examples should give you a better idea of how to use the filter.
+
+### Example 1: Send all events
+
+```json
+{
+ "send-everything": true
+}
+```
+
+### Example 2: Send only `create` events for `repository` entities
+
+```json
+{
+ "send-everything": false,
+ "filters": [
+ {
+ "entity-type": "repository",
+ "operations": ["create"]
+ }
+ ]
+}
+```
+
+### Example 3: Send `create` and `update` for repositories and `delete` for instances
+
+```json
+{
+ "send-everything": false,
+ "filters": [
+ {
+ "entity-type": "repository",
+ "operations": ["create", "update"]
+ },
+ {
+ "entity-type": "instance",
+ "operations": ["delete"]
+ }
+ ]
+}
+```
+
+## Connecting to the events endpoint
+
+You can use any websocket client, written in any programming language to interact with the events endpoint. In the following exmple I'll show you how to do it from go.
+
+Before we start, we'll need a JWT token to access the events endpoint. Normally, if you use the CLI, you should have it in your `~/.local/share/garm-cli` folder. But if you know your username and password, we can fetch a fresh one using `curl`:
+
+```bash
+# Read the password from the terminal
+read -s PASSWD
+
+# Get the token
+curl -s -X POST -d '{"username": "admin", "password": "'$PASSWD'"}' \
+ https://garm.example.com/api/v1/auth/login | jq -r .token
+```
+
+Save the token, we'll need it for later.
+
+Now, let's write a simple go program that connects to the events endpoint and subscribes to all events. We'll use the reader that was added to [`garm-provider-common`](https://github.com/cloudbase/garm-provider-common) in version `v0.1.3`, to make this easier:
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ garmWs "github.com/cloudbase/garm-provider-common/util/websocket"
+ "github.com/gorilla/websocket"
+)
+
+// List of signals to interrupt the program
+var signals = []os.Signal{
+ os.Interrupt,
+ syscall.SIGTERM,
+}
+
+// printToConsoleHandler is a simple function that prints the message to the console.
+// In a real world implementation, you can use this function to decide how to properly
+// handle the events.
+func printToConsoleHandler(_ int, msg []byte) error {
+ fmt.Println(string(msg))
+ return nil
+}
+
+func main() {
+ // Set up the context to listen for signals.
+ ctx, stop := signal.NotifyContext(context.Background(), signals...)
+ defer stop()
+
+ // This is the JWT token you got from the curl command above.
+ token := "superSecretJWTToken"
+ // The base URL of your GARM server
+ baseURL := "https://garm.example.com"
+ // This is the path to the events endpoint
+ pth := "/api/v1/ws/events"
+
+ // Instantiate the websocket reader
+ reader, err := garmWs.NewReader(ctx, baseURL, pth, token, printToConsoleHandler)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // Start the loop.
+ if err := reader.Start(); err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // Set the filter to receive all events. You can use a more fine grained filter if you wish.
+ reader.WriteMessage(websocket.TextMessage, []byte(`{"send-everything":true}`))
+
+ fmt.Println("Listening for events. Press Ctrl+C to stop.")
+ // Wait for the context to be done.
+ <-ctx.Done()
+}
+```
+
+If you run this program and change something in the GARM database, you should see the event being printed to the console:
+
+```bash
+gabriel@rossak:/tmp/ex$ go run ./main.go
+{"entity-type":"pool","operation":"update","payload":{"runner_prefix":"garm","id":"8ec34c1f-b053-4a5d-80d6-40afdfb389f9","provider_name":"lxd","max_runners":10,"min_idle_runners":0,"image":"ubuntu:22.04","flavor":"default","os_type":"linux","os_arch":"amd64","tags":[{"id":"76781c93-e354-402e-907a-785caab36207","name":"self-hosted"},{"id":"2ff4a89e-e3b4-4e78-b977-6c21e83cca3d","name":"x64"},{"id":"5b3ffec6-0402-4322-b2a9-fa7f692bbc00","name":"Linux"},{"id":"e95e106d-1a3d-11ee-bd1d-00163e1f621a","name":"ubuntu"},{"id":"3b54ae6c-5e9b-4a81-8e6c-0f78a7b37b04","name":"repo"}],"enabled":true,"instances":[],"repo_id":"70227434-e7c0-4db1-8c17-e9ae3683f61e","repo_name":"gsamfira/scripts","runner_bootstrap_timeout":20,"extra_specs":{"disable_updates":true,"enable_boot_debug":true},"github-runner-group":"","priority":10}}
+```
+
+In the above example, you can see an `update` event on a `pool` entity. The `payload` field contains the full, updated `pool` entity.
diff --git a/doc/external_provider.md b/doc/external_provider.md
index 84d524d9..70b0374d 100644
--- a/doc/external_provider.md
+++ b/doc/external_provider.md
@@ -1,31 +1,31 @@
# Writing an external provider
-External provider enables you to write a fully functional provider, using any scripting or programming language. Garm will call your executable to manage the lifecycle of the instances hosting the runners. This document describes the API that an executable needs to implement to be usable by ```garm```.
+External provider enables you to write a fully functional provider, using any scripting or programming language. Garm will call your executable to manage the lifecycle of the instances hosting the runners. This document describes the API that an executable needs to implement to be usable by `garm`.
## Environment variables
-When ```garm``` calls your executable, a number of environment variables are set, depending on the operation. There are three environment variables that will always be set regardless of operation. Those variables are:
+When `garm` calls your executable, a number of environment variables are set, depending on the operation. There are three environment variables that will always be set regardless of operation. Those variables are:
-* ```GARM_COMMAND```
-* ```GARM_PROVIDER_CONFIG_FILE```
-* ```GARM_CONTROLLER_ID```
+* `GARM_COMMAND`
+* `GARM_PROVIDER_CONFIG_FILE`
+* `GARM_CONTROLLER_ID`
The following are variables that are specific to some operations:
-* ```GARM_POOL_ID```
-* ```GARM_INSTANCE_ID```
+* `GARM_POOL_ID`
+* `GARM_INSTANCE_ID`
### The GARM_COMMAND variable
-The ```GARM_COMMAND``` environment variable will be set to one of the operations defined in the interface. When your executable is called, you'll need to inspect this variable to know which operation you need to execute.
+The `GARM_COMMAND` environment variable will be set to one of the operations defined in the interface. When your executable is called, you'll need to inspect this variable to know which operation you need to execute.
### The GARM_PROVIDER_CONFIG_FILE variable
-The ```GARM_PROVIDER_CONFIG_FILE``` variable will contain a path on disk to a file that can contain whatever configuration your executable needs. For example, in the case of the [sample OpenStack external provider](../contrib/providers.d/openstack/keystonerc), this file contains variables that you would normally find in a ```keystonerc``` file, used to access an OpenStack cloud. But you can use it to add any extra configuration you need.
+The `GARM_PROVIDER_CONFIG_FILE` variable will contain a path on disk to a file that can contain whatever configuration your executable needs. For example, in the case of the [OpenStack external provider](https://github.com/cloudbase/garm-provider-openstack), this file is a toml which contains provider specific configuration options. The provider author decides what this file needs to contain for the provider to function properly.
-The config is opaque to ```garm``` itself. It only has meaning for your external provider.
+GARM does not read this file in any way. It is simply passed to the executable via the environment variable.
-In your executable, you could implement something like this:
+The OpenStack provider mentioned above is written in Go, but it doesn't need to be. For example, if your provider is written in BASH, handling the config file could look something like this:
```bash
if [ -f "${GARM_PROVIDER_CONFIG_FILE}" ];then
@@ -54,37 +54,37 @@ esac
### The GARM_CONTROLLER_ID variable
-The ```GARM_CONTROLLER_ID``` variable is set for all operations.
+The `GARM_CONTROLLER_ID` variable is set for all operations.
When garm first starts up, it generates a unique ID that identifies it as an instance. This ID is passed to the provider and should always be used to tag resources in whichever cloud you write your provider for. This ensures that if you have multiple garm installations, one particular deployment of garm will never touch any resources it did not create.
-In most clouds you can attach ```tags``` to resources. You can use the controller ID as one of the tags during the ```CreateInstance``` operation.
+In most clouds you can attach `tags` to resources. You can use the controller ID as one of the tags during the `CreateInstance` operation.
### The GARM_POOL_ID variable
-The ```GARM_POOL_ID``` environment variable is a ```UUID4``` describing the pool in which a runner is created. This variable is set in two operations:
+The `GARM_POOL_ID` environment variable is a `UUID4` describing the pool in which a runner is created. This variable is set in two operations:
* CreateInstance
* ListInstances
-As with the ```GARM_CONTROLLER_ID```, this ID **must** also be attached as a tag or whichever mechanism your target cloud supports, to identify the pool to which the resources (in most cases the VMs) belong to.
+As with the `GARM_CONTROLLER_ID`, this ID **must** also be attached as a tag or whichever mechanism your target cloud supports, to identify the pool to which the resources (in most cases the VMs) belong to.
### The GARM_INSTANCE_ID variable
-The ```GARM_INSTANCE_ID``` environment variable is used in four operations:
+The `GARM_INSTANCE_ID` environment variable is used in four operations:
* GetInstance
* DeleteInstance
* Start
* Stop
-It contains the ```provider_id``` of the instance. The ```provider_id``` is a unique identifier, specific to the IaaS in which the compute resource was created. In OpenStack, it's an ```UUID4```, while in LXD, it's the virtual machine's name.
+It contains the `provider_id` of the instance. The `provider_id` is a unique identifier, specific to the IaaS in which the compute resource was created. In OpenStack, it's an `UUID4`, while in LXD, it's the virtual machine's name.
We need this ID whenever we need to execute an operation that targets one specific runner.
## Operations
-The operations that a provider must implement are described in the ```Provider``` [interface available here](https://github.com/cloudbase/garm/blob/223477c4ddfb6b6f9079c444d2f301ef587f048b/runner/providers/external/execution/interface.go#L9-L27). The external provider implements this interface, and delegates each operation to your external executable. [These operations are](https://github.com/cloudbase/garm/blob/223477c4ddfb6b6f9079c444d2f301ef587f048b/runner/providers/external/execution/commands.go#L5-L13):
+The operations that a provider must implement are described in the `Provider` [interface available here](https://github.com/cloudbase/garm/blob/223477c4ddfb6b6f9079c444d2f301ef587f048b/runner/providers/external/execution/interface.go#L9-L27). The external provider implements this interface, and delegates each operation to your external executable. [These operations are](https://github.com/cloudbase/garm/blob/223477c4ddfb6b6f9079c444d2f301ef587f048b/runner/providers/external/execution/commands.go#L5-L13):
* CreateInstance
* DeleteInstance
@@ -96,30 +96,30 @@ The operations that a provider must implement are described in the ```Provider``
## CreateInstance
-The ```CreateInstance``` command has the most moving parts. The ideal external provider is one that will create all required resources for a fully functional instance, will start the instance. Waiting for the instance to start is not necessary. If the instance can reach the ```callback_url``` configured in ```garm```, it will update it's own status when it starts running the userdata script.
+The `CreateInstance` command has the most moving parts. The ideal external provider is one that will create all required resources for a fully functional instance, will start the instance. Waiting for the instance to start is not necessary. If the instance can reach the `callback_url` configured in `garm`, it will update it's own status when it starts running the userdata script.
But aside from creating resources, the ideal external provider is also idempotent, and will clean up after itself in case of failure. If for any reason the executable will fail to create the instance, any dependency that it has created up to the point of failure, should be cleaned up before returning an error code.
-At the very least, it must be able to clean up those resources, if it is called with the ```DeleteInstance``` command by ```garm```. Garm will retry creating a failed instance. Before it tries again, it will attempt to run a ```DeleteInstance``` using the ```provider_id``` returned by your executable.
+At the very least, it must be able to clean up those resources, if it is called with the `DeleteInstance` command by `garm`. Garm will retry creating a failed instance. Before it tries again, it will attempt to run a `DeleteInstance` using the `provider_id` returned by your executable.
-If your executable failed before a ```provider_id``` could be supplied, ```garm``` will send the name of the instance as a ```GARM_INSTANCE_ID``` environment variable.
+If your executable failed before a `provider_id` could be supplied, `garm` will send the name of the instance as a `GARM_INSTANCE_ID` environment variable.
-Your external provider will need to be able to handle both. The instance name generated by ```garm``` will be unique, so it's fairly safe to use when deleting instances.
+Your external provider will need to be able to handle both. The instance name generated by `garm` will be unique, so it's fairly safe to use when deleting instances.
### CreateInstance inputs
-The ```CreateInstance``` command is the only command that needs to handle standard input. Garm will send the runner bootstrap information in stdin. The environment variables set for this command are:
+The `CreateInstance` command is the only command that needs to handle standard input. Garm will send the runner bootstrap information in stdin. The environment variables set for this command are:
* GARM_PROVIDER_CONFIG_FILE - Config file specific to your executable
* GARM_COMMAND - the command we need to run
-* GARM_CONTROLLER_ID - The unique ID of the ```garm``` installation
+* GARM_CONTROLLER_ID - The unique ID of the `garm` installation
* GARM_POOL_ID - The unique ID of the pool this node is a part of
-The information sent in via standard input is a ```json``` serialized instance of the [BootstrapInstance structure](https://github.com/cloudbase/garm/blob/6b3ea50ca54501595e541adde106703d289bb804/params/params.go#L164-L217)
+The information sent in via standard input is a `json` serialized instance of the [BootstrapInstance structure](https://github.com/cloudbase/garm/blob/6b3ea50ca54501595e541adde106703d289bb804/params/params.go#L164-L217)
Here is a sample of that:
- ```json
+```json
{
"name": "garm-ny9HeeQYw2rl",
"tools": [
@@ -174,7 +174,7 @@ Here is a sample of that:
}
],
"repo_url": "https://github.com/gabriel-samfira/scripts",
- "callback-url": "https://garm.example.com/api/v1/callbacks/status",
+ "callback-url": "https://garm.example.com/api/v1/callbacks",
"metadata-url": "https://garm.example.com/api/v1/metadata",
"instance-token": "super secret JWT token",
"extra_specs": {
@@ -188,46 +188,43 @@ Here is a sample of that:
"image": "8ed8a690-69b6-49eb-982f-dcb466895e2d",
"labels": [
"ubuntu",
- "self-hosted",
- "x64",
- "linux",
"openstack",
"runner-controller-id:f9286791-1589-4f39-a106-5b68c2a18af4",
"runner-pool-id:9dcf590a-1192-4a9c-b3e4-e0902974c2c0"
],
"pool_id": "9dcf590a-1192-4a9c-b3e4-e0902974c2c0"
}
- ```
+```
In your executable you can read in this blob, by using something like this:
- ```bash
+```bash
# Test if the stdin file descriptor is opened
if [ ! -t 0 ]
then
# Read in the information from standard in
INPUT=$(cat -)
fi
- ```
+```
-Then you can easily parse it. If you're using ```bash```, you can use the amazing [jq json processor](https://stedolan.github.io/jq/). Other programming languages have suitable libraries that can handle ```json```.
+Then you can easily parse it. If you're using `bash`, you can use the amazing [jq json processor](https://stedolan.github.io/jq/). Other programming languages have suitable libraries that can handle `json`.
You will have to parse the bootstrap params, verify that the requested image exists, gather operating system information, CPU architecture information and using that information, you will need to select the appropriate tools for the arch/OS combination you are deploying.
Refer to the OpenStack or Azure providers available in the [providers.d](../contrib/providers.d/) folder. Of particular interest are the [cloudconfig folders](../contrib/providers.d/openstack/cloudconfig/), where the instance user data templates are stored. These templates are used to generate the needed automation for the instances to download the github runner agent, send back status updates (including the final github runner agent ID), and download the github runner registration token from garm.
-Examples of external providers written in Go can be found at the followinf locations:
+Examples of external providers written in Go can be found at the following locations:
*
*
### CreateInstance outputs
-On success, your executable is expected to print to standard output a json that can be deserialized into an ```Instance{}``` structure [defined here](https://github.com/cloudbase/garm/blob/6b3ea50ca54501595e541adde106703d289bb804/params/params.go#L90-L154).
+On success, your executable is expected to print to standard output a json that can be deserialized into an `Instance{}` structure [defined here](https://github.com/cloudbase/garm/blob/6b3ea50ca54501595e541adde106703d289bb804/params/params.go#L90-L154).
Not all fields are expected to be populated by the provider. The ones that should be set are:
- ```json
+```json
{
"provider_id": "88818ff3-1fca-4cb5-9b37-84bfc3511ea6",
"name": "garm-ny9HeeQYw2rl",
@@ -239,17 +236,17 @@ Not all fields are expected to be populated by the provider. The ones that shoul
"pool_id": "41c4a43a-acee-493a-965b-cf340b2c775d",
"provider_fault": ""
}
- ```
+```
-In case of error, ```garm``` expects at the very least to see a non-zero exit code. If possible, your executable should return as much information as possible via the above ```json```, with the ```status``` field set to ```error``` and the ```provider_fault``` set to a meaningful error message describing what has happened. That information will be visible when doing a:
+In case of error, `garm` expects at the very least to see a non-zero exit code. If possible, your executable should return as much information as possible via the above `json`, with the `status` field set to `error` and the `provider_fault` set to a meaningful error message describing what has happened. That information will be visible when doing a:
- ```bash
+```bash
garm-cli runner show
- ```
+```
## DeleteInstance
-The ```DeleteInstance``` command will permanently remove an instance from the cloud provider.
+The `DeleteInstance` command will permanently remove an instance from the cloud provider.
The environment variables set for this command are:
@@ -258,13 +255,13 @@ The environment variables set for this command are:
* GARM_INSTANCE_ID
* GARM_PROVIDER_CONFIG_FILE
-This command is not expected to output anything. On success it should simply ```exit 0```.
+This command is not expected to output anything. On success it should simply `exit 0`.
If the target instance does not exist in the provider, this command is expected to be a no-op.
## GetInstance
-The ```GetInstance``` command will return details about the instance, as seen by the provider.
+The `GetInstance` command will return details about the instance, as seen by the provider.
The environment variables set for this command are:
@@ -273,13 +270,13 @@ The environment variables set for this command are:
* GARM_INSTANCE_ID
* GARM_PROVIDER_CONFIG_FILE
-On success, this command is expected to return a valid ```json``` that can be deserialized into an ```Instance{}``` structure (see CreateInstance). If possible, IP addresses allocated to the VM should be returned in addition to the sample ```json``` printed above.
+On success, this command is expected to return a valid `json` that can be deserialized into an `Instance{}` structure (see CreateInstance). If possible, IP addresses allocated to the VM should be returned in addition to the sample `json` printed above.
On failure, this command is expected to return a non-zero exit code.
## ListInstances
-The ```ListInstances``` command will print to standard output, a json that is deserializable into an **array** of ```Instance{}```.
+The `ListInstances` command will print to standard output, a json that is deserializable into an **array** of `Instance{}`.
The environment variables set for this command are:
@@ -288,15 +285,15 @@ The environment variables set for this command are:
* GARM_PROVIDER_CONFIG_FILE
* GARM_POOL_ID
-This command must list all instances that have been tagged with the value in ```GARM_POOL_ID```.
+This command must list all instances that have been tagged with the value in `GARM_POOL_ID`.
-On success, a ```json``` is expected on standard output.
+On success, a `json` is expected on standard output.
On failure, a non-zero exit code is expected.
## RemoveAllInstances
-The ```RemoveAllInstances``` operation will remove all resources created in a cloud that have been tagged with the ```GARM_CONTROLLER_ID```. External providers should tag all resources they create with the garm controller ID. That tag can then be used to identify all resources when attempting to delete all instances.
+The `RemoveAllInstances` operation will remove all resources created in a cloud that have been tagged with the `GARM_CONTROLLER_ID`. External providers should tag all resources they create with the garm controller ID. That tag can then be used to identify all resources when attempting to delete all instances.
The environment variables set for this command are:
@@ -312,7 +309,7 @@ Note: This command is currently not used by garm.
## Start
-The ```Start``` operation will start the virtual machine in the selected cloud.
+The `Start` operation will start the virtual machine in the selected cloud.
The environment variables set for this command are:
@@ -327,9 +324,9 @@ On failure, a non-zero exit code is expected.
## Stop
-NOTE: This operation is currently not use by ```garm```, but should be implemented.
+NOTE: This operation is currently not use by `garm`, but should be implemented.
-The ```Stop``` operation will stop the virtual machine in the selected cloud.
+The `Stop` operation will stop the virtual machine in the selected cloud.
Available environment variables:
diff --git a/doc/extra_specs.md b/doc/extra_specs.md
index 8e55d11e..859b1fbd 100644
--- a/doc/extra_specs.md
+++ b/doc/extra_specs.md
@@ -2,7 +2,7 @@
ExtraSpecs is an opaque raw json that gets sent to the provider as part of the bootstrap params for instances. It can contain any kind of data needed by providers. The contents of this field means nothing to garm itself. We don't act on the information in this field at all. We only validate that it's a proper json.
-However, during the installation phase of the runners, GARM providers can leverage the information set in this field to augment the process in many ways. This can be used for anything rangin from overriding provider config values, to supplying a different runner install template, to passing in information that is relevant only to specific providers.
+However, during the installation phase of the runners, GARM providers can leverage the information set in this field to augment the process in many ways. This can be used for anything ranging from overriding provider config values, to supplying a different runner install template, to passing in information that is relevant only to specific providers.
For example, the [external OpenStack provider](https://github.com/cloudbase/garm-provider-openstack) uses this to [override](https://github.com/cloudbase/garm-provider-openstack#tweaking-the-provider) things like `security groups`, `storage backends`, `network ids`, etc.
diff --git a/doc/gitea.md b/doc/gitea.md
new file mode 100644
index 00000000..72d3a202
--- /dev/null
+++ b/doc/gitea.md
@@ -0,0 +1,358 @@
+# Using GARM with Gitea
+
+Starting with Gitea 1.24 and the latest version of GARM (upcomming v0.2.0 - currently `main`), GARM supports Gitea as a forge, side by side with GitHub/GHES. A new endpoint type has been added to represent Gitea instances, which you can configure and use along side your GitHub runners.
+
+You can essentially create runners for both GitHub and Gitea using the same GARM instance, using the same CLI and the same API. It's simply a matter of adding an endpoint and credentials. The rest is the same as for github.
+
+## Quickstart
+
+This is for testing purposes only. We'll assume you're running on an Ubuntu 24.04 VM or server. You can use anything you'd like, but this quickstart is tailored to get you up and running with the LXD provider. So we'll:
+
+* Initialize LXD
+* Create a docker compose yaml
+* Deploy Gitea and GARM
+* Configure GARM to use Gitea
+
+You will have to install Docker-CE yourself.
+
+### Initialize LXD
+
+If you already have LXD initialized, you can skip this step. Otherwise, simply run:
+
+```bash
+sudo lxd init --auto
+```
+
+This should set up LXD with default settings that should work on any system.
+
+LXD and Docker sometimes have issues with networking due to some conflicting iptables rules. In most cases, if you have docker installed and notice that you don't have access to the outside world from the containers, run the following command:
+
+```bash
+sudo iptables -I DOCKER-USER -j ACCEPT
+```
+
+### Create the docker compose
+
+Create a docker compose file in `$HOME/compose.yaml`. This docker compose will deploy both gitea and GARM. If you already have a Gitea >=1.24.0, you can edit this docker compose to only deploy GARM.
+
+```yaml
+networks:
+ default:
+ external: false
+
+services:
+ gitea:
+ image: docker.gitea.com/gitea:1.24.0-rc0
+ container_name: gitea
+ environment:
+ - USER_UID=1000
+ - USER_GID=1000
+ restart: always
+ networks:
+ - default
+ volumes:
+ - /etc/gitea/gitea:/data
+ - /etc/timezone:/etc/timezone:ro
+ - /etc/localtime:/etc/localtime:ro
+ ports:
+ - "80:80"
+ - "22:22"
+ garm:
+ image: ghcr.io/cloudbase/garm:${GARM_VERSION:-nightly}
+ container_name: garm
+ environment:
+ - USER_UID=1000
+ - USER_GID=1000
+ restart: always
+ networks:
+ - default
+ volumes:
+ - /etc/garm:/etc/garm
+ - /etc/timezone:/etc/timezone:ro
+ - /etc/localtime:/etc/localtime:ro
+ # Give GARM access to the LXD socket. We need this later in the LXD provider.
+ - /var/snap/lxd/common/lxd/unix.socket:/var/snap/lxd/common/lxd/unix.socket
+ ports:
+ - "9997:9997"
+```
+
+Create the folders for Gitea and GARM:
+
+```bash
+sudo mkdir -p /etc/gitea /etc/garm
+sudo chown 1000:1000 /etc/gitea /etc/garm
+```
+
+Create the GARM configuration file:
+
+```bash
+
+sudo tee /etc/garm/config.toml <_YE+$%d+O;BMDqnaB)`U4_*iF8snEpEszPyg4N*lI&"
+time_to_live = "8760h"
+
+[apiserver]
+ bind = "0.0.0.0"
+ port = 9997
+ use_tls = false
+
+[database]
+ backend = "sqlite3"
+ # This needs to be changed.
+ passphrase = "OsawnUlubmuHontamedOdVurwetEymni"
+ [database.sqlite3]
+ db_file = "/etc/garm/garm.db"
+
+# This enables the LXD provider. There are other providers available in the image
+# in /opt/garm/providers.d. Feel free to use them as well.
+[[provider]]
+ name = "lxd_local"
+ provider_type = "external"
+ description = "Local LXD installation"
+ [provider.external]
+ provider_executable = "/opt/garm/providers.d/garm-provider-lxd"
+ config_file = "/etc/garm/garm-provider-lxd.toml"
+EOF
+```
+
+Create the LXD provider config file:
+
+```bash
+sudo tee /etc/garm/garm-provider-lxd.toml <
-Garm uses a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) to create runner registration tokens, list current self hosted runners and potentially remove them if they become orphaned (the VM was manually removed on the provider).
+- [Configuring github endpoints and credentials](#configuring-github-endpoints-and-credentials)
+ - [Create GitHub endpoint](#create-github-endpoint)
+ - [Listing GitHub endpoints](#listing-github-endpoints)
+ - [Adding GitHub credentials](#adding-github-credentials)
+ - [Listing GitHub credentials](#listing-github-credentials)
+ - [Deleting GitHub credentials](#deleting-github-credentials)
-From the list of scopes, you will need to select:
+
+
+## Create GitHub endpoint
+
+To create a new GitHub endpoint, you can use the following command:
+
+```bash
+garm-cli github endpoint create \
+ --name example \
+ --description "Just an example ghes endpoint" \
+ --base-url https://ghes.example.com \
+ --upload-url https://upload.ghes.example.com \
+ --api-base-url https://api.ghes.example.com \
+ --ca-cert-path $HOME/ca-cert.pem
+```
+
+## Listing GitHub endpoints
+
+To list the available GitHub endpoints, you can use the following command:
+
+```bash
+ubuntu@garm:~/garm$ garm-cli github endpoint list
++------------+--------------------------+-------------------------------+
+| NAME | BASE URL | DESCRIPTION |
++------------+--------------------------+-------------------------------+
+| github.com | https://github.com | The github.com endpoint |
++------------+--------------------------+-------------------------------+
+| example | https://ghes.example.com | Just an example ghes endpoint |
++------------+--------------------------+-------------------------------+
+```
+
+## Adding GitHub credentials
+
+GARM has the option to use both [Personal Access Tokens (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) or a [GitHub App](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app).
+
+
+If you'll use a PAT (classic), you'll have to grant access for the following scopes:
* ```public_repo``` - for access to a repository
* ```repo``` - for access to a private repository
* ```admin:org``` - if you plan on using this with an organization to which you have access
* ```manage_runners:enterprise``` - if you plan to use garm at the enterprise level
+* ```admin:repo_hook``` - if you want to allow GARM to install webhooks on repositories (optional)
+* ```admin:org_hook``` - if you want to allow GARM to install webhooks on organizations (optional)
-The resulting token must be configured in the ```[[github]]``` section of the config. Sample as follows:
+Fine grained PATs are also supported as long as you grant the required privileges:
-```toml
-# This is a list of credentials that you can define as part of the repository
-# or organization definitions. They are not saved inside the database, as there
-# is no Vault integration (yet). This will change in the future.
-# Credentials defined here can be listed using the API. Obviously, only the name
-# and descriptions are returned.
-[[github]]
- name = "gabriel"
- description = "github token or user gabriel"
- # This is a personal token with access to the repositories and organizations
- # you plan on adding to garm. The "workflow" option needs to be selected in order
- # to work with repositories, and the admin:org needs to be set if you plan on
- # adding an organization.
- oauth2_token = "super secret token"
- # base_url (optional) is the URL at which your GitHub Enterprise Server can be accessed.
- # If these credentials are for github.com, leave this setting blank
- base_url = "https://ghe.example.com"
- # api_base_url (optional) is the base URL where the GitHub Enterprise Server API can be accessed.
- # Leave this blank if these credentials are for github.com.
- api_base_url = "https://ghe.example.com"
- # upload_base_url (optional) is the base URL where the GitHub Enterprise Server upload API can be accessed.
- # Leave this blank if these credentials are for github.com, or if you don't have a separate URL
- # for the upload API.
- upload_base_url = "https://api.ghe.example.com"
- # ca_cert_bundle (optional) is the CA certificate bundle in PEM format that will be used by the github
- # client to talk to the API. This bundle will also be sent to all runners as bootstrap params.
- # Use this option if you're using a self signed certificate.
- # Leave this blank if you're using github.com or if your certificate is signed by a valid CA.
- ca_cert_bundle = "/etc/garm/ghe.crt"
+* **Repository permissions**:
+ * `Administration: Read & write` - needed to generate JIT config/registration token, remove runners, etc.
+ * `Metadata: Read-only` - automatically enabled by above
+ * `Webhooks: Read & write` - needed to install webhooks on repositories
+* **Organization permissions**:
+ * `Self-hosted runners: Read & write` - needed to manage runners in an organization
+ * `Webhooks: Read & write` - needed to install webhooks on organizations
+
+If you plan to use github apps, you'll need to select the following permissions:
+
+* **Repository permissions**:
+ * ```Administration: Read & write```
+ * ```Metadata: Read-only```
+ * ```Webhooks: Read & write```
+* **Organization permissions**:
+ * ```Self-hosted runners: Read & write```
+ * ```Webhooks: Read & write```
+
+**Note** :warning:: Github Apps are not available at the enterprise level.
+
+To add a new GitHub credential, you can use the following command:
+
+```bash
+garm-cli github credentials add \
+ --name gabriel \
+ --description "GitHub PAT for user gabriel" \
+ --auth-type pat \
+ --pat-oauth-token gh_theRestOfThePAT \
+ --endpoint github.com
```
-The double parenthesis means that this is an array. You can specify the ```[[github]]``` section multiple times, with different tokens from different users, or with different access levels. You will then be able to list the available credentials using the API, and reference these credentials when adding repositories or organizations.
+To add a new GitHub App credential, you can use the following command:
-The API will only ever return the name and description to the API consumer.
+```bash
+garm-cli github credentials add \
+ --name gabriel_app \
+ --description "Github App with access to repos" \
+ --endpoint github.com \
+ --auth-type app \
+ --app-id 1 \
+ --app-installation-id 99 \
+ --private-key-path $HOME/yourAppName.2024-03-01.private-key.pem
+```
+
+All sensitive data is encrypted at rest. The API will not return any sensitive info.
+
+## Listing GitHub credentials
+
+To list the available GitHub credentials, you can use the following command:
+
+```bash
+garm-cli github credentials list
+```
+
+## Deleting GitHub credentials
+
+To delete a GitHub credential, you can use the following command:
+
+```bash
+garm-cli github credentials delete
+```
\ No newline at end of file
diff --git a/doc/images/garm-dark.diagram.svg b/doc/images/garm-dark.diagram.svg
new file mode 100644
index 00000000..d48f2616
--- /dev/null
+++ b/doc/images/garm-dark.diagram.svg
@@ -0,0 +1,4 @@
+
+
+
+GitHub/GHES GARM Incus/LXD Providers k8s AWS EC2 Azure LXD/Incus Provider creates compute instance in target infrastructure
Provider creates compute instance in target infrastructure
GCP/openstack/etc Web hook endpoint records/updates job
Web hook endpoint records/updates job Webhook Endpoint Job queue Pool manager consumes jobs in "queued" state
Pool manager consumes jobs in "queued" state Entities (repos/orgs/enterprises)
Entities (repos/orgs/enterprises) Pool Manager Pool leverages provider to create instance
Pool leverages provider to create instance Pool s
(homogeneous set of ephemeral runners)
Pools... Instances fetch their metadata and report installation progress
Instances fetch their metadata and report installation progress garm-runnerN Azure garm-runnerN AWS EC2 garm-runnerN k8s
garm-runnerN Entities (repos/orgs/enterprises)
Entities (repos/orgs/enterprises) Webhook signals new Job Webhooks Pool manager selects appropriate pool
Pool manager selects appropriate pool garm-runner1 garm-runnerN Runner status is updated in the GARM DB
Runner status is updated in the GARM DB Callback URLs (metadata, status updates, etc)
Callback URLs (metadata, status updates, etc) Self-hosted runners garm-runner1 garm-runner3 garm-runner2 garm-runnerN The GitHub runner registers itself in the target entity
The GitHub runner registers itself in the target entity WebSocket (logs/events)
\ No newline at end of file
diff --git a/doc/images/garm-dark.svg b/doc/images/garm-dark.svg
new file mode 100644
index 00000000..f0a0c564
--- /dev/null
+++ b/doc/images/garm-dark.svg
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/images/garm-light.diagram.svg b/doc/images/garm-light.diagram.svg
new file mode 100644
index 00000000..754eafd9
--- /dev/null
+++ b/doc/images/garm-light.diagram.svg
@@ -0,0 +1,4 @@
+
+
+
+GitHub/GHES GARM Incus/LXD Providers k8s AWS EC2 Azure LXD/Incus Provider creates compute instance in target infrastructure
Provider creates compute instance in target infrastructure
GCP/openstack/etc Web hook endpoint records/updates job
Web hook endpoint records/updates job Webhook Endpoint Job queue Pool manager consumes jobs in "queued" state
Pool manager consumes jobs in "queued" state Entities (repos/orgs/enterprises)
Entities (repos/orgs/enterprises) Pool Manager Pool leverages provider to create instance
Pool leverages provider to create instance Pool s
(homogeneous set of ephemeral runners)
Pools... Instances fetch their metadata and report installation progress
Instances fetch their metadata and report installation progress garm-runnerN Azure garm-runnerN AWS EC2 garm-runnerN k8s
garm-runnerN Entities (repos/orgs/enterprises)
Entities (repos/orgs/enterprises) Webhook signals new Job Webhooks Pool manager selects appropriate pool
Pool manager selects appropriate pool garm-runner1 garm-runnerN Runner status is updated in the GARM DB
Runner status is updated in the GARM DB Callback URLs (metadata, status updates, etc)
Callback URLs (metadata, status updates, etc) Self-hosted runners garm-runner1 garm-runner3 garm-runner2 garm-runnerN The GitHub runner registers itself in the target entity
The GitHub runner registers itself in the target entity WebSocket (logs/events)
\ No newline at end of file
diff --git a/doc/images/garm-light.svg b/doc/images/garm-light.svg
new file mode 100644
index 00000000..2495959d
--- /dev/null
+++ b/doc/images/garm-light.svg
@@ -0,0 +1,36 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/labels.md b/doc/labels.md
new file mode 100644
index 00000000..12daf605
--- /dev/null
+++ b/doc/labels.md
@@ -0,0 +1,15 @@
+# Labels and Tags
+
+Github Runners can be tagged with labels. These labels can be used to restrict the jobs that can run on a runner. For example, you can have a runner with the label `linux` and another with the label `windows`. You can then restrict a job to run only on a runner with the label `linux`.
+
+Whenever a new runner register themselves on Github, the runner knows its own labels as the labels are defined in the pool specification as tags.
+
+Before version 2.305.0 of the runner and before JIT runners were introduced, the runner registration process would append some default labels to the runner. These labels are:
+
+```yaml
+[ 'self-hosted', '$OS_TYPE', '$OS_ARCH' ]
+```
+
+This made scheduling and using runners a bit awkward in some situations. For example, in large organizations with many teams, often times workflows would simply target the `self-hosted` label. This would match all runners regardless of any other custom labels. This had the side effect that workflows would potentially use expensive runners for simple jobs or would select low resource runners for tasks that would require a lot of resources.
+
+Version 2.305.0 of the runner introduced the `--no-default-labels` flag when registering the runner. When JIT is not available (GHES version < 3.10), GARM will now register the runner with the `--no-default-labels` flag. If you still need the default labels, you can still add them when creating the pool as part of the `--tags` command line option.
diff --git a/doc/performance_considerations.md b/doc/performance_considerations.md
index 923586b8..42b81db5 100644
--- a/doc/performance_considerations.md
+++ b/doc/performance_considerations.md
@@ -6,17 +6,9 @@ Performance is often important when running GitHub action runners with garm. Thi
### Bundle the GitHub action runner
-When a new instance is created by garm, it usually downloads the latest available GitHub action runner binary, installs the requirements and starts it afterwards. This can be a time consuming task that quickly adds up when alot of instances are created by garm throughout the day. Therefore it is recommended to include the GitHub action runner binary inside of the used image.
+When a new instance is created by garm, it usually downloads the latest available GitHub action runner binary, installs the requirements and starts it afterwards. This can be a time consuming task that quickly adds up when a lot of instances are created by garm throughout the day. Therefore it is recommended to include the GitHub action runner binary inside of the used image.
-There are two ways to do that:
-
-1. Add the extracted runner to `/opt/cache/actions-runner/latest` in which case, garm won't do any version checking and will blindly trust that whatever you put there is indeed the latest. This is useful if you want to run a pre-release of the runner or have your own patches applied to it. Also GitHub runners have an auto-update mechanism. When it detects that a new version is available, it updates itself to the latest version.
-
-2. Add the extracted runner to `/opt/cache/actions-runner/$VERSION` where `$VERSION` is the version of the runner. In this case, if what garm fetches from GitHub is different than what you bundled in the image, it will download and install the version indicated by GitHub.
-
-Note, when bundling the runner with your image, you will have to download it, extract it to one of the above mentioned locations and also run the `./bin/installdependencies.sh` inside the extracted folder. All dependencies needed to run the runner must be pre-installed when bundling.
-
-Example steps:
+Example steps for setting a cached runner on a linux image in LXD:
```bash
# Create a temporary instance from your base image
@@ -26,27 +18,29 @@ lxc launch temp
lxc exec temp -- bash
# Get and install the runner
-mkdir -p /opt/cache/actions-runner/latest
-cd /opt/cache/actions-runner/latest
-curl -o actions-runner-linux-x64-2.305.0.tar.gz -L https://github.com/actions/runner/releases/download/v2.305.0/actions-runner-linux-x64-2.305.0.tar.gz
-tar xzf ./actions-runner-linux-x64-2.305.0.tar.gz
-./bin/installdependencies.sh
+mkdir -p /home/runner/actions-runner
+cd /home/runner/actions-runner
+curl -O -L https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz
+# Extract the installer
+tar xzf ./actions-runner-linux-x64-2.320.0.tar.gz
# Exit the container
exit
# Stop the instance and publish it as a new image
lxc stop temp
-lxc publish temp --alias BASE_IMAGE-2.305.0
+lxc publish temp --alias BASE_IMAGE-2.320.0
# Delete the temporary instance
lxc delete temp
# Update garm to use the new image
garm-cli pool update \
- --image=BASE_IMAGE-2.305.0
+ --image=BASE_IMAGE-2.320.0
```
+You can read more about cached runners in the [Using Cached Runners](https://github.com/cloudbase/garm/blob/main/doc/using_cached_runners.md) documentation.
+
### Disable updates
By default garm configures the `cloud-init` process of a new instance to update packages on startup. To prevent this from happening (and therefore reduce the time needed to start an instance) garm can be configured accordingly.
diff --git a/doc/providers.md b/doc/providers.md
deleted file mode 100644
index 7b7d686b..00000000
--- a/doc/providers.md
+++ /dev/null
@@ -1,169 +0,0 @@
-# Provider configuration
-
-GARM was designed to be extensible. Providers can be written either as built-in plugins or as external executables. The built-in plugins are written in Go, and they are compiled into the ```GARM``` binary. External providers are executables that implement the needed interface to create/delete/list compute systems that are used by ```GARM``` to create runners.
-
-GARM currently ships with one built-in provider for [LXD](https://linuxcontainers.org/lxd/introduction/) and the external provider interface which allows you to write your own provider in any language you want.
-
-- [LXD provider](#lxd-provider)
- - [LXD remotes](#lxd-remotes)
- - [LXD Security considerations](#lxd-security-considerations)
-- [External provider](#external-provider)
- - [Available external providers](#available-external-providers)
-
-## LXD provider
-
-GARM leverages LXD to create the runners. Here is a sample config section for an LXD provider:
-
-```toml
-# Currently, providers are defined statically in the config. This is due to the fact
-# that we have not yet added support for storing secrets in something like Barbican
-# or Vault. This will change in the future. However, for now, it's important to remember
-# that once you create a pool using one of the providers defined here, the name of that
-# provider must not be changed, or the pool will no longer work. Make sure you remove any
-# pools before removing or changing a provider.
-[[provider]]
- # An arbitrary string describing this provider.
- name = "lxd_local"
- # Provider type. GARM is designed to allow creating providers which are used to spin
- # up compute resources, which in turn will run the github runner software.
- # Currently, LXD is the only supprted provider, but more will be written in the future.
- provider_type = "lxd"
- # A short description of this provider. The name, description and provider types will
- # be included in the information returned by the API when listing available providers.
- description = "Local LXD installation"
- [provider.lxd]
- # the path to the unix socket that LXD is listening on. This works if GARM and LXD
- # are on the same system, and this option takes precedence over the "url" option,
- # which connects over the network.
- unix_socket_path = "/var/snap/lxd/common/lxd/unix.socket"
- # When defining a pool for a repository or an organization, you have an option to
- # specify a "flavor". In LXD terms, this translates to "profiles". Profiles allow
- # you to customize your instances (memory, cpu, disks, nics, etc).
- # This option allows you to inject the "default" profile along with the profile selected
- # by the flavor.
- include_default_profile = false
- # instance_type defines the type of instances this provider will create.
- #
- # Options are:
- #
- # * virtual-machine (default)
- # * container
- #
- instance_type = "container"
- # enable/disable secure boot. If the image you select for the pool does not have a
- # signed bootloader, set this to false, otherwise your instances won't boot.
- secure_boot = false
- # Project name to use. You can create a separate project in LXD for runners.
- project_name = "default"
- # URL is the address on which LXD listens for connections (ex: https://example.com:8443)
- url = ""
- # GARM supports certificate authentication for LXD remote connections. The easiest way
- # to get the needed certificates, is to install the lxc client and add a remote. The
- # client_certificate, client_key and tls_server_certificate can be then fetched from
- # $HOME/snap/lxd/common/config.
- client_certificate = ""
- client_key = ""
- tls_server_certificate = ""
- [provider.lxd.image_remotes]
- # Image remotes are important. These are the default remotes used by lxc. The names
- # of these remotes are important. When specifying an "image" for the pool, that image
- # can be a hash of an existing image on your local LXD installation or it can be a
- # remote image from one of these remotes. You can specify the images as follows:
- # Example:
- #
- # * ubuntu:20.04
- # * ubuntu_daily:20.04
- # * images:centos/8/cloud
- #
- # Ubuntu images come pre-installed with cloud-init which we use to set up the runner
- # automatically and customize the runner. For non Ubuntu images, you need to use the
- # variant that has "/cloud" in the name. Those images come with cloud-init.
- [provider.lxd.image_remotes.ubuntu]
- addr = "https://cloud-images.ubuntu.com/releases"
- public = true
- protocol = "simplestreams"
- skip_verify = false
- [provider.lxd.image_remotes.ubuntu_daily]
- addr = "https://cloud-images.ubuntu.com/daily"
- public = true
- protocol = "simplestreams"
- skip_verify = false
- [provider.lxd.image_remotes.images]
- addr = "https://images.linuxcontainers.org"
- public = true
- protocol = "simplestreams"
- skip_verify = false
-```
-
-You can choose to connect to a local LXD server by using the ```unix_socket_path``` option, or you can connect to a remote LXD cluster/server by using the ```url``` option. If both are specified, the unix socket takes precedence. The config file is fairly well commented, but I will add a note about remotes.
-
-### LXD remotes
-
-By default, GARM does not load any image remotes. You get to choose which remotes you add (if any). An image remote is a repository of images that LXD uses to create new instances, either virtual machines or containers. In the absence of any remote, GARM will attempt to find the image you configure for a pool of runners, on the LXD server we're connecting to. If one is present, it will be used, otherwise it will fail and you will need to configure a remote.
-
-The sample config file in this repository has the usual default ```LXD``` remotes:
-
-* (ubuntu) - Official Ubuntu images
-* (ubuntu_daily) - Official Ubuntu images, daily build
-* (images) - Community maintained images for various operating systems
-
-When creating a new pool, you'll be able to specify which image you want to use. The images are referenced by ```remote_name:image_tag```. For example, if you want to launch a runner on an Ubuntu 20.04, the image name would be ```ubuntu:20.04```. For a daily image it would be ```ubuntu_daily:20.04```. And for one of the unofficial images it would be ```images:centos/8-Stream/cloud```. Note, for unofficial images you need to use the tags that have ```/cloud``` in the name. These images come pre-installed with ```cloud-init``` which we need to set up the runners automatically.
-
-You can also create your own image remote, where you can host your own custom images. If you want to build your own images, have a look at [distrobuilder](https://github.com/lxc/distrobuilder).
-
-Image remotes in the ```GARM``` config, is a map of strings to remote settings. The name of the remote is the last bit of string in the section header. For example, the following section ```[provider.lxd.image_remotes.ubuntu_daily]```, defines the image remote named **ubuntu_daily**. Use this name to reference images inside that remote.
-
-You can also use locally uploaded images. Check out the [performance considerations](./performance_considerations.md) page for details on how to customize local images and use them with GARM.
-
-### LXD Security considerations
-
-GARM does not apply any ACLs of any kind to the instances it creates. That task remains in the responsibility of the user. [Here is a guide for creating ACLs in LXD](https://linuxcontainers.org/lxd/docs/master/howto/network_acls/). You can of course use ```iptables``` or ```nftables``` to create any rules you wish. I recommend you create a separate isolated lxd bridge for runners, and secure it using ACLs/iptables/nftables.
-
-You must make sure that the code that runs as part of the workflows is trusted, and if that cannot be done, you must make sure that any malicious code that will be pulled in by the actions and run as part of a workload, is as contained as possible. There is a nice article about [securing your workflow runs here](https://blog.gitguardian.com/github-actions-security-cheat-sheet/).
-
-## External provider
-
-The external provider is a special kind of provider. It delegates the functionality needed to create the runners to external executables. These executables can be either binaries or scripts. As long as they adhere to the needed interface, they can be used to create runners in any target IaaS. This is identical to what ```containerd``` does with ```CNIs```.
-
-There are currently two sample external providers available in the [contrib folder of this repository](../contrib/providers.d/). The providers are written in ```bash``` and are meant as examples of how a provider could be written in ```bash```. Production ready providers would need more error checking and idempotency, but they serve as an example of what can be done. As it stands, they are functional.
-
-The configuration for an external provider is quite simple:
-
-```toml
-# This is an example external provider. External providers are executables that
-# implement the needed interface to create/delete/list compute systems that are used
-# by GARM to create runners.
-[[provider]]
-name = "openstack_external"
-description = "external openstack provider"
-provider_type = "external"
- [provider.external]
- # config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable
- config_file = "/etc/garm/providers.d/openstack/keystonerc"
- # Absolute path to an executable that implements the provider logic. This executable can be
- # anything (bash, a binary, python, etc). See documentation in this repo on how to write an
- # external provider.
- provider_executable = "/etc/garm/providers.d/openstack/garm-external-provider"
-```
-
-The external provider has two options:
-
-* ```provider_executable```
-* ```config_file```
-
-The ```provider_executable``` option is the absolute path to an executable that implements the provider logic. GARM will delegate all provider operations to this executable. This executable can be anything (bash, python, perl, go, etc). See [Writing an external provider](./external_provider.md) for more details.
-
-The ```config_file``` option is a path on disk to an arbitrary file, that is passed to the external executable via the environment variable ```GARM_PROVIDER_CONFIG_FILE```. This file is only relevant to the external provider. GARM itself does not read it. In the case of the sample OpenStack provider, this file contains access information for an OpenStack cloud (what you would typically find in a ```keystonerc``` file) as well as some provider specific options like whether or not to boot from volume and which tenant network to use. You can check out the [sample config file](../contrib/providers.d/openstack/keystonerc) in this repository.
-
-If you want to implement an external provider, you can use this file for anything you need to pass into the binary when ```GARM``` calls it to execute a particular operation.
-
-### Available external providers
-
-For non testing purposes, there are two external providers currently available:
-
-* [OpenStack](https://github.com/cloudbase/garm-provider-openstack)
-* [Azure](https://github.com/cloudbase/garm-provider-azure)
-
-Details on how to install and configure them are available in their respective repositories.
-
-If you wrote a provider and would like to add it to the above list, feel free to open a PR.
diff --git a/doc/quickstart.md b/doc/quickstart.md
index 23fc89b7..889f799b 100644
--- a/doc/quickstart.md
+++ b/doc/quickstart.md
@@ -1,32 +1,21 @@
# Quick start
-Okay, I lied. It's not that quick. But it's not that long either. I promise.
+
-In this guide I'm going to take you through the entire process of setting up garm from scratch. This will include editing the config file (which will probably take the longest amount of time), fetching a proper [PAT](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) (personal access token) from GitHub, setting up the webhooks endpoint, defining your repo/org/enterprise and finally setting up a runner pool.
+ - [Create the config folder](#create-the-config-folder)
+ - [The config file](#the-config-file)
+ - [The provider section](#the-provider-section)
+ - [Starting the service](#starting-the-service)
+ - [Using Docker](#using-docker)
+ - [Setting up GARM as a system service](#setting-up-garm-as-a-system-service)
+ - [Initializing GARM](#initializing-garm)
+ - [Setting up the webhook](#setting-up-the-webhook)
+ - [Creating a GitHub endpoint Optional](#creating-a-github-endpoint-optional)
+ - [Adding credentials](#adding-credentials)
+ - [Define a repo](#define-a-repo)
+ - [Create a pool](#create-a-pool)
-For the sake of this guide, we'll assume you have access to the following setup:
-
-* A linux machine (ARM64 or AMD64)
-* Optionally, docker/podman installed on that machine
-* A public IP address or port forwarding set up on your router for port `80` or `443`. You can forward any ports, we just need to remember to use the same ports when we define the webhook in github, and the two URLs in the config file (more on that later). For the sake of this guide, I will assume you have port `80` or `443` forwarded to your machine.
-* An `A` record pointing to your public IP address (optional, but recommended). Alternatively, you can use the IP address directly. I will use `garm.example.com` in this guide. If you'll be using an IP address, just replace `garm.example.com` with your IP address throughout this guide.
-* All config files and data will be stored in `/etc/garm`.
-* A [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token)
-
-Why the need to expose GARM to the internet? Well, GARM uses webhooks sent by GitHub to automatically scale runners. Whenever a new job starts, a webhook is generated letting GARM know that there is a need for a runner. GARM then spins up a new runner instance and registers it with GitHub. When the job is done, the runner instance is automatically removed. This workflow is enabled by webhooks.
-
-## The GitHub PAT (Personal Access Token)
-
-Let's start by fetching a PAT so we get that out of the way. You can use the [GitHub docs](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) to create a PAT.
-
-For a `classic` PAT, GARM needs the following permissions to function properly (depending on the hierarchy level you want to manage):
-
-* ```public_repo``` - for access to a repository
-* ```repo``` - for access to a private repository
-* ```admin:org``` - if you plan on using this with an organization to which you have access
-* ```manage_runners:enterprise``` - if you plan to use garm at the enterprise level
-
-This doc will be updated at a future date with the exact permissions needed in case you want to use a fine grained PAT.
+
## Create the config folder
@@ -36,18 +25,28 @@ All of our config files and data will be stored in `/etc/garm`. Let's create tha
sudo mkdir -p /etc/garm
```
-Coincidentally, this is also where the docker container [looks for the config](../Dockerfile#L29) when it starts up. You can either use `Docker` or you can set up garm directly on your system. I'll show you both ways. In both cases, we need to first create the config folder and a proper config file.
+Coincidentally, this is also where the docker container [looks for the config](/Dockerfile#L29) when it starts up. You can either use `Docker` or you can set up garm directly on your system. We'll walk you through both options. In both cases, we need to first create the config folder and a proper config file.
## The config file
-There is a full config file, with detailed comments for each option, in the [testdata folder](../testdata/config.toml). You can use that as a reference. But for the purposes of this guide, we'll be using a minimal config file and add things on as we proceed.
+There is a full config file, with detailed comments for each option, in the [testdata folder](/testdata/config.toml). You can use that as a reference. But for the purposes of this guide, we'll be using a minimal config file and add things on as we proceed.
Open `/etc/garm/config.toml` in your favorite editor and paste the following:
```toml
[default]
-callback_url = "https://garm.example.com/api/v1/callbacks/status"
-metadata_url = "https://garm.example.com/api/v1/metadata"
+enable_webhook_management = true
+
+[logging]
+# If using nginx, you'll need to configure connection upgrade headers
+# for the /api/v1/ws location. See the sample config in the testdata
+# folder.
+enable_log_streamer = true
+# Set this to "json" if you want to consume these logs in something like
+# Loki or ELK.
+log_format = "text"
+log_level = "info"
+log_source = false
[metrics]
enable = true
@@ -62,6 +61,9 @@ time_to_live = "8760h"
bind = "0.0.0.0"
port = 80
use_tls = false
+ [apiserver.webui]
+ # Set this to false if you want to disable the Web UI.
+ enable = true
[database]
backend = "sqlite3"
@@ -71,74 +73,54 @@ time_to_live = "8760h"
db_file = "/etc/garm/garm.db"
```
-This is a minimal config, with no providers or credentials defined. In this example we have the [default](./config_default.md), [metrics](./config_metrics.md), [jwt_auth](./config_jwt_auth.md), [apiserver](./config_api_server.md) and [database](./config_database.md) sections. Each are documented separately. Feel free to read through the available docs if, for example you need to enable TLS without using an nginx reverse proxy or if you want to enable the debug server, the log streamer or a log file.
+This is a minimal config, with no providers defined. In this example we have the [default](/doc/config.md#the-default-config-section), [logging](/doc/config.md#the-logging-section), [metrics](/doc/config.md#the-metrics-section), [jwt_auth](/doc/config.md#the-jwt-authentication-config-section), [apiserver](/doc/config.md#the-api-server-config-section) and [database](/doc/config.md#database-configuration) sections. Each are documented separately. Feel free to read through the available docs if, for example you need to enable TLS without using an nginx reverse proxy or if you want to enable the debug server, the log streamer or a log file.
In this sample config we:
-* define the callback and the metadata URLs
+* set up logging prefrences
* enable metrics with authentication
* set a JWT secret which is used to sign JWT tokens
* set a time to live for the JWT tokens
* enable the API server on port `80` and bind it to all interfaces
* set the database backend to `sqlite3` and set a passphrase for sealing secrets (just webhook secrets for now)
-The callback URLs are really important and need to point back to garm. You will notice that the domain name used in these options, is the same one we defined at the beginning of this guide. If you won't use a domain name, replace `garm.example.com` with your IP address and port number.
-
-We need to tell garm by which addresses it can be reached. There are many ways by which GARMs API endpoints can be exposed, and there is no sane way in which GARM itself can determine if it's behind a reverse proxy or not. The metadata URL may be served by a reverse proxy with a completely different domain name than the callback URL. Both domains pointing to the same installation of GARM in the end.
-
-The information in these two options is used by the instances we spin up to phone home their status and to fetch the needed metadata to finish setting themselves up. For now, the metadata URL is only used to fetch the runner registration token.
-
-We won't go too much into detail about each of the options here. Have a look at the different config sections and their respective docs for more information.
-
-At this point, we have a valid config file, but we still need to add `provider` and `credentials` sections.
+At this point, we have a valid config file, but we still need to add the `provider` section.
## The provider section
This is where you have a decision to make. GARM has a number of providers you can leverage. At the time of this writing, we have support for:
-* LXD
-* Azure
-* OpenStack
+* [OpenStack](https://github.com/cloudbase/garm-provider-openstack)
+* [Azure](https://github.com/cloudbase/garm-provider-azure)
+* [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider!
+* [LXD](https://github.com/cloudbase/garm-provider-lxd)
+* [Incus](https://github.com/cloudbase/garm-provider-incus)
+* [Equinix Metal](https://github.com/cloudbase/garm-provider-equinix)
+* [Amazon EC2](https://github.com/cloudbase/garm-provider-aws)
+* [Google Cloud Platform (GCP)](https://github.com/cloudbase/garm-provider-gcp)
+* [Oracle Cloud Infrastructure (OCI)](https://github.com/cloudbase/garm-provider-oci)
-The LXD provider is built into GARM itself and has no external requirements. The [Azure](https://github.com/cloudbase/garm-provider-azure) and [OpenStack](https://github.com/cloudbase/garm-provider-openstack) ones are `external` providers in the form of an executable that GARM calls into.
-
-Both the LXD and the external provider configs are [documented in a separate doc](./providers.md).
-
-The easiest provider to set up is probably the LXD provider. You don't need an account on an external cloud. You can just use your machine.
+The easiest provider to set up is probably the LXD or Incus provider. Incus is a fork of LXD so the functionality is identical (for now). For the purpose of this document, we'll continue with LXD. You don't need an account on an external cloud. You can just use your machine.
You will need to have LXD installed and configured. There is an excellent [getting started guide](https://documentation.ubuntu.com/lxd/en/latest/getting_started/) for LXD. Follow the instructions there to install and configure LXD, then come back here.
-Once you have LXD installed and configured, you can add the provider section to your config file. If you're connecting to the `local` LXD installation, the [config snippet for the LXD provider](./providers.md#lxd-provider) will work out of the box. We'll be connecting using the unix socket so no further configuration will be needed.
+Once you have LXD installed and configured, you can add the provider section to your config file. If you're connecting to the `local` LXD installation, the [config snippet for the LXD provider](https://github.com/cloudbase/garm-provider-lxd/blob/4ee4e6fc579da4a292f40e0f7deca1e396e223d0/testdata/garm-provider-lxd.toml) will work out of the box. We'll be connecting using the unix socket so no further configuration will be needed.
-Go ahead and copy and paste that entire snippet in your GARM config file (`/etc/garm/config.toml`).
+Go ahead and create a new config in a location where GARM can access it and paste that entire snippet. For the purposes of this doc, we'll assume you created a new file called `/etc/garm/garm-provider-lxd.toml`. That config file will be used by the provider itself. Remember, the providers are external executables that are called by GARM. They have their own configs which are relevant only to those executables, not GARM itself.
-You can also use an external provider instead of LXD. You will need to define the provider section in your config file and point it to the executable and the provider config file. The [config snippet for the external provider](./providers.md#external-provider) gives you an example of how that can be done. Configuring the external provider is outside the scope of this guide. You will need to consult the documentation for the external provider you want to use.
-
-## The credentials section
-
-The credentials section is where we define out GitHub credentials. GARM is capable of using either GitHub proper or [GitHub Enterprise Server](https://docs.github.com/en/enterprise-server@3.6/get-started/onboarding/getting-started-with-github-enterprise-server). The credentials section allows you to override the default GitHub API endpoint and point it to your own deployment of GHES.
-
-The credentials section is [documented in a separate doc](./github_credentials.md), but we will include a small snippet here for clarity.
+We now need to define the provider in the GARM config file and tell GARM how it can find both the provider binary and the provider specific config file. To do that, open the GARM config file `/etc/garm/config.toml` in your favorite editor and paste the following config snippet at the end:
```toml
-# This is a list of credentials that you can define as part of the repository
-# or organization definitions. They are not saved inside the database, as there
-# is no Vault integration (yet). This will change in the future.
-# Credentials defined here can be listed using the API. Obviously, only the name
-# and descriptions are returned.
-[[github]]
- name = "gabriel"
- description = "github token for user gabriel"
- # This is a personal token with access to the repositories and organizations
- # you plan on adding to garm. The "workflow" option needs to be selected in order
- # to work with repositories, and the admin:org needs to be set if you plan on
- # adding an organization.
- oauth2_token = "super secret token"
+[[provider]]
+ name = "lxd_local"
+ provider_type = "external"
+ description = "Local LXD installation"
+ [provider.external]
+ provider_executable = "/opt/garm/providers.d/garm-provider-lxd"
+ config_file = "/etc/garm/garm-provider-lxd.toml"
```
-The `oauth2_token` option will hold the PAT we created earlier. You can add multiple credentials to the config file. Each will be referenced by name when we define the repo/org/enterprise.
-
-Alright, we're almost there. We have a config file with a provider and a credentials section. We now have to start the service and create a webhook in GitHub pointing at our `webhook` endpoint.
+This config snippet assumes that the LXD provider executable is available, or is going to be available in `/opt/garm/providers.d/garm-provider-lxd`. If you're using the container image, the executable is already there. If you're installing GARM as a systemd service, don't worry, instructions on how to get the LXD provider executable are coming up.
## Starting the service
@@ -154,10 +136,10 @@ docker run -d \
-p 80:80 \
-v /etc/garm:/etc/garm:rw \
-v /var/snap/lxd/common/lxd/unix.socket:/var/snap/lxd/common/lxd/unix.socket:rw \
- ghcr.io/cloudbase/garm:v0.1.2
+ ghcr.io/cloudbase/garm:v0.1.6
```
-You will notice we also mounted the LXD unix socket from the host inside the container where the config you pasted expects to find it. If you plan to use an external provider that does not need to connect to LXD over a unix socket, feel free to remove that mount.
+You will notice that we also mounted the LXD unix socket from the host inside the container where the config you pasted expects to find it. If you plan to use an external provider that does not need to connect to LXD over a unix socket, feel free to remove that mount.
Check the logs to make sure everything is working as expected:
@@ -182,12 +164,12 @@ useradd --shell /usr/bin/false \
--no-create-home garm
```
-Adding the `garm` user to the LXD group will allow it to connect to the LXD unix socket. We'll need that considering the config we crafted above.
+Adding the `garm` user to the LXD group will allow it to connect to the LXD unix socket. We'll need that considering the config we crafted above. The recommendation is to use TCP connections to connect to a remote LXD installation. The local setup of an LXD provider is just for demonstration purposes/testing.
Next, download the latest release from the [releases page](https://github.com/cloudbase/garm/releases).
```bash
-wget -q -O - https://github.com/cloudbase/garm/releases/download/v0.1.2/garm-linux-amd64.tgz | tar xzf - -C /usr/local/bin/
+wget -q -O - https://github.com/cloudbase/garm/releases/download/v0.1.6/garm-linux-amd64.tgz | tar xzf - -C /usr/local/bin/
```
We'll be running under an unprivileged user. If we want to be able to listen on any port under `1024`, we'll have to set some capabilities on the binary:
@@ -196,6 +178,20 @@ We'll be running under an unprivileged user. If we want to be able to listen on
setcap cap_net_bind_service=+ep /usr/local/bin/garm
```
+Create a folder for the external providers:
+
+```bash
+sudo mkdir -p /opt/garm/providers.d
+```
+
+Download the LXD provider binary:
+
+```bash
+git clone https://github.com/cloudbase/garm-provider-lxd
+cd garm-provider-lxd
+go build -o /opt/garm/providers.d/garm-provider-lxd
+```
+
Change the permissions on the config dir:
```bash
@@ -206,7 +202,7 @@ Copy the sample `systemd` service file:
```bash
wget -O /etc/systemd/system/garm.service \
- https://raw.githubusercontent.com/cloudbase/garm/v0.1.2/contrib/garm.service
+ https://raw.githubusercontent.com/cloudbase/garm/v0.1.6/contrib/garm.service
```
Reload the `systemd` daemon and start the service:
@@ -225,72 +221,72 @@ ubuntu@garm:~$ sudo journalctl -u garm
Check that you can make a request to the API:
```bash
-ubuntu@garm:~$ curl http://garm.example.com/webhooks
ubuntu@garm:~$ docker logs garm
signal.NotifyContext(context.Background, [interrupt terminated])
2023/07/17 22:21:33 Loading provider lxd_local
2023/07/17 22:21:33 registering prometheus metrics collectors
2023/07/17 22:21:33 setting up metric routes
-2023/07/17 22:21:35 ignoring unknown event
-172.17.0.1 - - [17/Jul/2023:22:21:35 +0000] "GET /webhooks HTTP/1.1" 200 0 "" "curl/7.81.0"
```
-Excellent! We have a working GARM installation. Now we need to set up the webhook in GitHub.
-
-## Setting up the webhook
-
-Before we create a pool, we need to set up the webhook in GitHub. This is a fairly simple process.
-
-Head over to the [webhooks doc](./webhooks.md) and follow the instructions there. Come back here when you're done.
-
-After you've finished setting up the webhook, there are just a few more things to do:
-
-* Initialize GARM
-* Add a repo/org/enterprise
-* Create a pool
+Excellent! We have a working GARM installation. Now we need to initialize the controller and set up the webhook in GitHub.
## Initializing GARM
-Before we can start using GARM, we need initialize it. This will create the `admin` user and generate a unique controller ID that will identify this GARM installation. This process allows us to use multiple GARM installations with the same GitHub account. GARM will use the controller ID to identify the runners it creates. This way we won't run the risk of accidentally removing runners we don't manage.
+Before we can start using GARM, we need initialize it. This will create the `admin` user and generate a unique controller ID that will identify this GARM installation. This process allows us to use multiple GARM installations with the same GitHub account, if we want or need to. GARM will use the controller ID to identify the runners it creates. This way we won't run the risk of accidentally removing runners we don't manage.
To initialize GARM, we'll use the `garm-cli` tool. You can download the latest release from the [releases page](https://github.com/cloudbase/garm/releases):
```bash
-wget -q -O - https://github.com/cloudbase/garm/releases/download/v0.1.2/garm-cli-linux-amd64.tgz | tar xzf - -C /usr/local/bin/
+wget -q -O - https://github.com/cloudbase/garm/releases/download/v0.1.6/garm-cli-linux-amd64.tgz | tar xzf - -C /usr/local/bin/
```
Now we can initialize GARM:
```bash
-ubuntu@garm:~$ garm-cli init --name="local_garm" --url https://garm.example.com
+ubuntu@garm:~$ garm-cli init --name="local_garm" --url http://garm.example.com
Username: admin
-Email: root@localhost
-✔ Password: *************
+Email: admin@garm.example.com
+✔ Password: ************█
+✔ Confirm password: ************█
+Congrats! Your controller is now initialized.
+
+Following are the details of the admin user and details about the controller.
+
+Admin user information:
+
+----------+--------------------------------------+
| FIELD | VALUE |
+----------+--------------------------------------+
-| ID | ef4ab6fd-1252-4d5a-ba5a-8e8bd01610ae |
+| ID | 6b0d8f67-4306-4702-80b6-eb0e2e4ee695 |
| Username | admin |
-| Email | root@localhost |
+| Email | admin@garm.example.com |
| Enabled | true |
+----------+--------------------------------------+
-```
-The init command also created a local CLI profile for your new GARM server:
+Controller information:
-```bash
-ubuntu@garm:~$ garm-cli profile list
-+----------------------+--------------------------+
-| NAME | BASE URL |
-+----------------------+--------------------------+
-| local_garm (current) | https://garm.example.com |
-+----------------------+--------------------------+
++------------------------+-----------------------------------------------------------------------+
+| FIELD | VALUE |
++------------------------+-----------------------------------------------------------------------+
+| Controller ID | 0c54fd66-b78b-450a-b41a-65af2fd0f71b |
+| Metadata URL | http://garm.example.com/api/v1/metadata |
+| Callback URL | http://garm.example.com/api/v1/callbacks |
+| Webhook Base URL | http://garm.example.com/webhooks |
+| Controller Webhook URL | http://garm.example.com/webhooks/0c54fd66-b78b-450a-b41a-65af2fd0f71b |
++------------------------+-----------------------------------------------------------------------+
+
+Make sure that the URLs in the table above are reachable by the relevant parties.
+
+The metadata and callback URLs *must* be accessible by the runners that GARM spins up.
+The base webhook and the controller webhook URLs must be accessible by GitHub or GHES.
```
Every time you init a new GARM instance, a new profile will be created in your local `garm-cli` config. You can also log into an already initialized instance using:
```bash
-garm-cli profile add --name="another_garm" --url https://garm2.example.com
+garm-cli profile add \
+ --name="another_garm" \
+ --url https://garm2.example.com
```
Then you can switch between profiles using:
@@ -299,6 +295,77 @@ Then you can switch between profiles using:
garm-cli profile switch another_garm
```
+## Setting up the webhook
+
+There are two options when it comes to setting up the webhook in GitHub. You can manually set up the webhook in the GitHub UI, and then use the resulting secret when creating the entity (repo, org, enterprise), or you can let GARM do it automatically if the app or PAT you're using has the [required privileges](/doc/github_credentials.md).
+
+If you want to manually set up the webhooks, have a look at the [webhooks doc](/doc/webhooks.md) for more information.
+
+In this guide, I'll show you how to do it automatically when adding a new repo, assuming you have the required privileges. Note, you'll still have to manually set up webhooks if you want to use GARM at the enterprise level. Automatic webhook management is only available for repos and orgs.
+
+## Creating a GitHub endpoint (Optional)
+
+This section is only of interest if you're using a GitHub Enterprise Server (GHES) deployment. If you're using [github.com](https://github.com), you can skip this section.
+
+Let's list existing endpoints:
+
+```bash
+gabriel@rossak:~$ garm-cli github endpoint list
++------------+--------------------+-------------------------+
+| NAME | BASE URL | DESCRIPTION |
++------------+--------------------+-------------------------+
+| github.com | https://github.com | The github.com endpoint |
++------------+--------------------+-------------------------+
+```
+
+By default, GARM creates a default `github.com` endpoint. This endpoint cannot be updated or deleted. If you want to add a new endpoint, you can do so using the `github endpoint create` command:
+
+```bash
+garm-cli github endpoint create \
+ --name example \
+ --description "Just an example ghes endpoint" \
+ --base-url https://ghes.example.com \
+ --upload-url https://upload.ghes.example.com \
+ --api-base-url https://api.ghes.example.com \
+ --ca-cert-path $HOME/ca-cert.pem
+```
+
+In this exampe, we add a new github endpoint called `example`. The `ca-cert-path` is optional and is used to verify the server's certificate. If you don't provide a path, GARM will use the system's default CA certificates.
+
+## Adding credentials
+
+Before we can add a new entity, we need github credentials to interact with that entity (manipulate runners, create webhooks, etc). Credentials are tied to a specific github endpoint. In this section we'll be adding credentials that are valid for either [github.com](https://github.com) or your own GHES server (if you added one in the previous section).
+
+When creating a new entity (repo, org, enterprise) using the credentials you define here, GARM will automatically associate that entity with the github endpoint that the credentials use.
+
+If you want to swap the credentials for an entity, the new credentials will need to be associated with the same endpoint as the old credentials.
+
+Let's add some credentials:
+
+```bash
+garm-cli github credentials add \
+ --name gabriel \
+ --description "GitHub PAT for user gabriel" \
+ --auth-type pat \
+ --pat-oauth-token gh_theRestOfThePAT \
+ --endpoint github.com
+```
+
+You can also add a GitHub App as credentials. The process is similar, but you'll need to provide the `app_id`, `private_key_path` and `installation_id`:
+
+```bash
+garm-cli github credentials add \
+ --name gabriel_app \
+ --description "Github App with access to repos" \
+ --endpoint github.com \
+ --auth-type app \
+ --app-id 1 \
+ --app-installation-id 99 \
+ --private-key-path $HOME/yourAppName.2024-03-01.private-key.pem
+```
+
+All sensitive info is encrypted at rest. Also, the API will not return sensitive data.
+
## Define a repo
We now have a working GARM installation, with github credentials and a provider added. It's time to add a repo.
@@ -306,45 +373,51 @@ We now have a working GARM installation, with github credentials and a provider
Before we add a repo, let's list credentials. We'll need their names when we'll add a new repo.
```bash
-gabriel@rossak:~$ garm-cli credentials list
-+---------+-------------------------------+--------------------+-------------------------+-----------------------------+
-| NAME | DESCRIPTION | BASE URL | API URL | UPLOAD URL |
-+---------+-------------------------------+--------------------+-------------------------+-----------------------------+
-| gabriel | github token for user gabriel | https://github.com | https://api.github.com/ | https://uploads.github.com/ |
-+---------+-------------------------------+--------------------+-------------------------+-----------------------------+
+ubuntu@garm:~$ garm-cli github credentials list
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| ID | NAME | DESCRIPTION | BASE URL | API URL | UPLOAD URL | TYPE |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| 1 | gabriel | GitHub PAT for user gabriel | https://github.com | https://api.github.com/ | https://uploads.github.com/ | pat |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| 2 | gabriel_app | Github App with access to repos | https://github.com | https://api.github.com/ | https://uploads.github.com/ | app |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
```
-Even though you didn't explicitly set the URLs, GARM will default to the GitHub ones. You can override them if you want to use a GHES deployment.
-
Now we can add a repo:
```bash
garm-cli repo add \
- --credentials gabriel \
--owner gsamfira \
--name scripts \
- --webhook-secret $SECRET
+ --credentials gabriel \
+ --random-webhook-secret \
+ --install-webhook \
+ --pool-balancer-type roundrobin
```
-In this case, `$SECRET` holds the webhook secret you set previously when you defined the webhook in GitHub. This secret is mandatory as GARM will always validate the webhook payloads it receives.
+This will add a new repo called `scripts` under the `gsamfira` org. We also tell GARM to generate a random secret and install a webhook using that random secret. If you want to use a specific secret, you can use the `--webhook-secret` option, but in that case, you'll have to manually set up the webhook in GitHub.
+
+The `--pool-balancer-type` option is used to set the pool balancer type. That dictates how GARM will choose in which pool it should create a new runner when consuming recorded queued jobs. If `roundrobin` (default) is used, GARM will cycle through all pools and create a runner in the first pool that has available resources. If `pack` is used, GARM will try to fill up a pool before moving to the next one. The order of the pools is determined by the pool priority. We'll see more about pools in the next section.
You should see something like this:
```bash
gabriel@rossak:~$ garm-cli repo add \
-> --credentials gabriel \
-> --owner gsamfira \
-> --name scripts \
-> --webhook-secret $SECRET
+ --name scripts \
+ --credentials gabriel_org \
+ --install-webhook \
+ --random-webhook-secret \
+ --owner gsamfira \
+ --pool-balancer-type roundrobin
+----------------------+--------------------------------------+
| FIELD | VALUE |
+----------------------+--------------------------------------+
-| ID | f4900c7c-2ec0-41bd-9eab-d70fe9bd850d |
+| ID | 0c91d9fd-2417-45d4-883c-05daeeaa8272 |
| Owner | gsamfira |
| Name | scripts |
-| Credentials | gabriel |
-| Pool manager running | false |
-| Failure reason | |
+| Pool balancer type | roundrobin |
+| Credentials | gabriel_app |
+| Pool manager running | true |
+----------------------+--------------------------------------+
```
@@ -352,11 +425,11 @@ We can now list the repos:
```bash
gabriel@rock:~$ garm-cli repo ls
-+--------------------------------------+----------+---------+------------------+------------------+
-| ID | OWNER | NAME | CREDENTIALS NAME | POOL MGR RUNNING |
-+--------------------------------------+----------+---------+------------------+------------------+
-| f4900c7c-2ec0-41bd-9eab-d70fe9bd850d | gsamfira | scripts | gabriel | true |
-+--------------------------------------+----------+---------+------------------+------------------+
++--------------------------------------+----------+--------------+------------------+--------------------+------------------+
+| ID | OWNER | NAME | CREDENTIALS NAME | POOL BALANCER TYPE | POOL MGR RUNNING |
++--------------------------------------+----------+--------------+------------------+--------------------+------------------+
+| 0c91d9fd-2417-45d4-883c-05daeeaa8272 | gsamfira | scripts | gabriel | roundrobin | true |
++--------------------------------------+----------+--------------+------------------+--------------------+------------------+
```
Excellent! Make a note of the ID. We'll need it later when we create a pool.
@@ -369,18 +442,18 @@ To create a pool we'll need the repo ID from the previous step (which we have) a
```bash
gabriel@rossak:~$ garm-cli provider list
-+-----------+------------------------+------+
-| NAME | DESCRIPTION | TYPE |
-+-----------+------------------------+------+
-| lxd_local | Local LXD installation | lxd |
-+-----------+------------------------+------+
++-----------+------------------------+-----------+
+| NAME | DESCRIPTION | TYPE |
++-----------+------------------------+-----------+
+| lxd_local | Local LXD installation | external |
++-----------+------------------------+-----------+
```
Now we can create a pool:
```bash
garm-cli pool add \
- --repo f4900c7c-2ec0-41bd-9eab-d70fe9bd850d \
+ --repo 0c91d9fd-2417-45d4-883c-05daeeaa8272 \
--enabled true \
--provider-name lxd_local \
--flavor default \
@@ -396,7 +469,7 @@ You should see something like this:
```bash
gabriel@rossak:~$ garm-cli pool add \
-> --repo f4900c7c-2ec0-41bd-9eab-d70fe9bd850d \
+> --repo 0c91d9fd-2417-45d4-883c-05daeeaa8272 \
> --enabled true \
> --provider-name lxd_local \
> --flavor default \
@@ -411,6 +484,7 @@ gabriel@rossak:~$ garm-cli pool add \
+--------------------------+--------------------------------------------+
| ID | 344e4a72-2035-4a18-a3d5-87bd3874b56c |
| Provider Name | lxd_local |
+| Priority | 0 |
| Image | ubuntu:22.04 |
| Flavor | default |
| OS Type | linux |
@@ -418,7 +492,7 @@ gabriel@rossak:~$ garm-cli pool add \
| Max Runners | 5 |
| Min Idle Runners | 0 |
| Runner Bootstrap Timeout | 20 |
-| Tags | self-hosted, amd64, Linux, ubuntu, generic |
+| Tags | ubuntu, generic |
| Belongs to | gsamfira/scripts |
| Level | repo |
| Enabled | true |
@@ -431,22 +505,22 @@ gabriel@rossak:~$ garm-cli pool add \
If we list the pool we should see it:
```bash
-gabriel@rock:~$ garm-cli pool ls -a
-+--------------------------------------+--------------+---------+----------------------------------------+------------------+-------+---------+---------------+
-| ID | IMAGE | FLAVOR | TAGS | BELONGS TO | LEVEL | ENABLED | RUNNER PREFIX |
-+--------------------------------------+--------------+---------+----------------------------------------+------------------+-------+---------+---------------+
-| 344e4a72-2035-4a18-a3d5-87bd3874b56c | ubuntu:22.04 | default | self-hosted amd64 Linux ubuntu generic | gsamfira/scripts | repo | true | garm |
-+--------------------------------------+--------------+---------+----------------------------------------+------------------+-------+---------+---------------+
+gabriel@rock:~$ garm-cli pool ls
++--------------------------------------+---------------------------+--------------+-----------------+------------------+-------+---------+---------------+----------+
+| ID | IMAGE | FLAVOR | TAGS | BELONGS TO | LEVEL | ENABLED | RUNNER PREFIX | PRIORITY |
++--------------------------------------+---------------------------+--------------+-----------------+------------------+-------+---------+---------------+----------+
+| 344e4a72-2035-4a18-a3d5-87bd3874b56c | ubuntu:22.04 | default | ubuntu generic | gsamfira/scripts | repo | true | garm | 0 |
++--------------------------------------+---------------------------+--------------+-----------------+------------------+-------+---------+---------------+----------+
```
-This pool is enabled, but the `min-idle-runners` option is set to 0. This means that it will not create any lingering runners. It will only create runners when a job is started. If your provider is slow to boot up new instances, you may want to set this to a value higher than 0.
+This pool is enabled, but the `min-idle-runners` option is set to 0. This means that it will not create any idle runners. It will only create runners when a job is started and a webhook is sent to our GARM server. Optionally, you can set `min-idle-runners` to a value greater than 0, but keep in mind that depending on the provider you use, this may incur cost.
For the purposes of this guide, we'll increase it to 1 so we have a runner created.
First, list current runners:
```bash
-gabriel@rossak:~$ garm-cli runner ls -a
+gabriel@rossak:~$ garm-cli runner ls
+----+------+--------+---------------+---------+
| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
+----+------+--------+---------------+---------+
@@ -462,6 +536,7 @@ gabriel@rossak:~$ garm-cli pool update 344e4a72-2035-4a18-a3d5-87bd3874b56c --mi
+--------------------------+--------------------------------------------+
| ID | 344e4a72-2035-4a18-a3d5-87bd3874b56c |
| Provider Name | lxd_local |
+| Priority | 0 |
| Image | ubuntu:22.04 |
| Flavor | default |
| OS Type | linux |
@@ -469,7 +544,7 @@ gabriel@rossak:~$ garm-cli pool update 344e4a72-2035-4a18-a3d5-87bd3874b56c --mi
| Max Runners | 5 |
| Min Idle Runners | 1 |
| Runner Bootstrap Timeout | 20 |
-| Tags | self-hosted, amd64, Linux, ubuntu, generic |
+| Tags | ubuntu, generic |
| Belongs to | gsamfira/scripts |
| Level | repo |
| Enabled | true |
@@ -482,7 +557,7 @@ gabriel@rossak:~$ garm-cli pool update 344e4a72-2035-4a18-a3d5-87bd3874b56c --mi
Now if we list the runners:
```bash
-gabriel@rossak:~$ garm-cli runner ls -a
+gabriel@rossak:~$ garm-cli runner ls
+----+-------------------+----------------+---------------+--------------------------------------+
| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
+----+-------------------+----------------+---------------+--------------------------------------+
@@ -504,7 +579,7 @@ gabriel@rossak:~$ lxc list
If we wait for a bit and run:
```bash
-gabriel@rossak:~$ garm-cli runner show garm-tdtD6zpsXhj1
+gabriel@rossak:~$ garm-cli runner show garm-tdtD6zpsXhj1
+-----------------+------------------------------------------------------------------------------------------------------+
| FIELD | VALUE |
+-----------------+------------------------------------------------------------------------------------------------------+
@@ -548,4 +623,6 @@ gabriel@rossak:~$ garm-cli job ls
There are no jobs sent yet to my GARM install, but once you start sending jobs, you'll see them here as well.
-That's it! You now have a working GARM installation. You can add more repos, orgs or enterprises and create more pools. You can also add more providers for different clouds and credentials with access to different GitHub resources.
+That's it! Now you have a working GARM installation. You can add more repos, orgs or enterprises and create more pools. You can also add more providers for different clouds and credentials with access to different GitHub resources.
+
+Check out the [Using GARM](/doc/using_garm.md) guide for more details on how to use GARM.
diff --git a/doc/scalesets.md b/doc/scalesets.md
new file mode 100644
index 00000000..de9d348e
--- /dev/null
+++ b/doc/scalesets.md
@@ -0,0 +1,93 @@
+# Scale Sets
+
+
+
+- [Scale Sets](#scale-sets)
+ - [Create a new scale set](#create-a-new-scale-set)
+ - [Scale Set vs Pool](#scale-set-vs-pool)
+
+
+
+GARM supports [scale sets](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller). This new mode of operation was added by GitHub to enable more efficient scheduling of runners using their own ARC (Actions Runner Controller) project. The APIs for enabling scale sets are not yet public and the scale set functionlity itself is not terribly well documented outside the context of ARC, but it can be implemented in third party auto scalers.
+
+In this document we will focus on how scale sets work, how they are different than pools and how to manage them.
+
+We'll start with detailing how to create a scale set.
+
+## Create a new scale set
+
+Creating a scale set is identical to [creating a pool](/doc/using_garm.md#creating-a-runner-pool), but instead of adding labels to a scale set, it takes a name. We'll assume you already have a provider enabled and you have added a repo, org or enterprise to GARM.
+
+```bash
+ubuntu@garm:~$ garm-cli repo ls
++--------------------------------------+-----------+--------------+------------+------------------+--------------------+------------------+
+| ID | OWNER | NAME | ENDPOINT | CREDENTIALS NAME | POOL BALANCER TYPE | POOL MGR RUNNING |
++--------------------------------------+-----------+--------------+------------+------------------+--------------------+------------------+
+| 84a5e82f-7ab1-427f-8ee0-4569b922296c | gsamfira | garm-testing | github.com | gabriel-samfira | roundrobin | true |
++--------------------------------------+-----------+--------------+------------+------------------+--------------------+------------------+
+```
+
+List providers:
+
+```bash
+ubuntu@garm:~$ garm-cli provider list
++--------------+---------------------------------+----------+
+| NAME | DESCRIPTION | TYPE |
++--------------+---------------------------------+----------+
+| incus | Incus external provider | external |
++--------------+---------------------------------+----------+
+| azure | azure provider | external |
++--------------+---------------------------------+----------+
+| aws_ec2 | Amazon EC2 provider | external |
++--------------+---------------------------------+----------+
+```
+
+Create a new scale set:
+
+```bash
+garm-cli scaleset add \
+ --repo 84a5e82f-7ab1-427f-8ee0-4569b922296c \
+ --provider-name incus \
+ --image ubuntu:22.04 \
+ --name garm-scale-set \
+ --flavor default \
+ --enabled true \
+ --min-idle-runners=0 \
+ --max-runners=20
++--------------------------+-----------------------+
+| FIELD | VALUE |
++--------------------------+-----------------------+
+| ID | 8 |
+| Scale Set ID | 14 |
+| Scale Name | garm-scale-set |
+| Provider Name | incus |
+| Image | ubuntu:22.04 |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 20 |
+| Min Idle Runners | 0 |
+| Runner Bootstrap Timeout | 20 |
+| Belongs to | gsamfira/garm-testing |
+| Level | repo |
+| Enabled | true |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | Default |
++--------------------------+-----------------------+
+```
+
+That's it. You now have a scale set created, ready to accept jobs.
+
+## Scale Set vs Pool
+
+Scale sets are a new way of managing runners. They were introduced by GitHub to enable more efficient scheduling of runners. Scale sets are meant to reduce API calls, improve reliability of message deliveries to the auto scaler and improve efficiency of runner scheduling. While webhooks work great most of the time, under heavy load, they may not fire or they may fire while the auto scaler is offline, leading to lost messages. If webhooks are fired while GARM is down, we will never know about those jobs unless we query the current workflow runs.
+
+Listing workflow runs is not feasible for orgs or enterprises, as that would mean listing all repos within an org then for each repository, listing all workflow runs. This gets worse for enterprises. Scale sets on the other hand allow GARM to subscribe to a message queue and get messages just for that scale set over HTTP long poll.
+
+Advantages of scale sets over pools:
+
+* No more need to install a webhook, reducing your security footprint.
+* Scheduling is done by GitHub. GARM receives runner requests from GitHub and GARM can choose to acquire those jobs or leave them for some other scaler.
+* Easier use of runner groups. While GARM supports runner groups, github currently [does not send the group name](https://github.com/orgs/community/discussions/158000) as part of webhooks in `queued` state. This prevents GARM (or any other auto scaler) to efficiently schedule runners to pools that have runner groups set. But given that in the case of scale sets, GitHub schedules the runners to the scaleset itself, we can efficiently create runners in certain runner groups.
+* scale set names must be unique within a runner group
diff --git a/doc/using_cached_runners.md b/doc/using_cached_runners.md
new file mode 100644
index 00000000..a5573b20
--- /dev/null
+++ b/doc/using_cached_runners.md
@@ -0,0 +1,52 @@
+# Using Cached Runners
+
+## GitHub Action Runners and GARM
+
+When a new instance is created by garm, it usually downloads the latest available GitHub action runner binary, installs the requirements and starts it afterwards. This can be a time consuming task that quickly adds up when a lot of instances are created by garm throughout the day. Therefore it is recommended to include the GitHub action runner binary inside of the used image.
+
+GARM supports cached runners on Linux and Windows images, in a simple manner. GARM verifies if the runner path exists (`C:\actions-runner` or `/home/runner/actions-runner`) on the chosen image, thus knowing if it needs to create the path and download the runner or use the existent runner. In order to simplify setup and validation of the runner, the check is based on the user properly creating, downloading and installing the runner in the predefined path on the target OS.
+
+>**NOTE:** More about these paths will be presented below in the sections for each target OS.
+
+### Cached Runners on Linux Images
+
+On a Linux image, the cached runner is expected by GARM to be setup in a static predefined way. It expects the cached runner to be installed in the `/home/runner/actions-runner` directory. Thus, the user needs to configure its custom image properly in order for GARM to use the cached runner and not download the latest available GitHub action runner binary.
+
+In order to configure a cached GitHub actions runner to work with GARM, the following steps need to be followed:
+
+1. The `actions-runner`directory needs to be created inside the `/home/runner` directory (home path for the garm runner)
+2. Download the wanted version of the runner package
+3. Extract the installer inside the `actions-runner` directory
+
+> **NOTE:** These are based on the steps described on the [actions/runner](https://github.com/actions/runner/releases) repository about installing the GitHub action runner on the Linux x64. The full list of commands looks like this:
+
+```bash
+# Create a folder
+mkdir actions-runner && cd actions-runner
+# Download the latest runner package
+curl -O -L https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz
+# Extract the installer
+tar xzf ./actions-runner-linux-x64-2.320.0.tar.gz
+```
+
+### Cached Runners on Windows Images
+
+On a Windows image, the cached runner is expected by GARM to be setup in a static predefined way. It expects the cached runner to be installed in the `C:\actions-runner\` folder. Thus, the user needs to configure its custom image properly in order for GARM to use the cached runner and not download the latest available GitHub action runner binary.
+
+In order to configure a cached GitHub actions runner to work with GARM, the following steps need to be followed:
+
+1. Create the folder `actions-runner` inside the root folder (`C:\`).
+2. Download the wanted version of runner package
+3. Extract the installer in the folder created at step 1 (`C:\actions-runner\`)
+
+> **NOTE:** These are based on the steps described on the [actions/runner](https://github.com/actions/runner/releases) repository about installing the GitHub action runner on the Windows x64. The full list of commands looks like this:
+
+```powershell
+# Create a folder under the drive root
+mkdir \actions-runner ; cd \actions-runner
+# Download the latest runner package
+Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-win-x64-2.320.0.zip -OutFile actions-runner-win-x64-2.320.0.zip
+# Extract the installer
+Add-Type -AssemblyName System.IO.Compression.FileSystem ;
+[System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD\actions-runner-win-x64-2.320.0.zip", "$PWD")
+```
\ No newline at end of file
diff --git a/doc/using_garm.md b/doc/using_garm.md
new file mode 100644
index 00000000..e7758410
--- /dev/null
+++ b/doc/using_garm.md
@@ -0,0 +1,816 @@
+# Using GARM
+
+This document will walk you through the various commands and options available in GARM. It is assumed that you have already installed GARM and have it running. If you haven't, please check out the [quickstart](/doc/quickstart.md) document for instructions on how to install GARM.
+
+While using the GARM cli, you will most likely spend most of your time listing pools and runners, but we will cover most of the available commands and options. Some of them we'll skip (like the `init` or `profile` subcommands), as they've been covered in the [quickstart](/doc/quickstart.md) document.
+
+
+- [Using GARM](#using-garm)
+ - [Controller operations](#controller-operations)
+ - [Listing controller info](#listing-controller-info)
+ - [Updating controller settings](#updating-controller-settings)
+ - [Providers](#providers)
+ - [Listing configured providers](#listing-configured-providers)
+ - [Github Endpoints](#github-endpoints)
+ - [Creating a GitHub Endpoint](#creating-a-github-endpoint)
+ - [Listing GitHub Endpoints](#listing-github-endpoints)
+ - [Getting information about an endpoint](#getting-information-about-an-endpoint)
+ - [Deleting a GitHub Endpoint](#deleting-a-github-endpoint)
+ - [GitHub credentials](#github-credentials)
+ - [Adding GitHub credentials](#adding-github-credentials)
+ - [Listing GitHub credentials](#listing-github-credentials)
+ - [Getting detailed information about credentials](#getting-detailed-information-about-credentials)
+ - [Deleting GitHub credentials](#deleting-github-credentials)
+ - [Repositories](#repositories)
+ - [Adding a new repository](#adding-a-new-repository)
+ - [Listing repositories](#listing-repositories)
+ - [Removing a repository](#removing-a-repository)
+ - [Organizations](#organizations)
+ - [Adding a new organization](#adding-a-new-organization)
+ - [Enterprises](#enterprises)
+ - [Adding an enterprise](#adding-an-enterprise)
+ - [Managing webhooks](#managing-webhooks)
+ - [Pools](#pools)
+ - [Creating a runner pool](#creating-a-runner-pool)
+ - [Listing pools](#listing-pools)
+ - [Showing pool info](#showing-pool-info)
+ - [Deleting a pool](#deleting-a-pool)
+ - [Update a pool](#update-a-pool)
+ - [Runners](#runners)
+ - [Listing runners](#listing-runners)
+ - [Showing runner info](#showing-runner-info)
+ - [Deleting a runner](#deleting-a-runner)
+ - [The debug-log command](#the-debug-log-command)
+ - [The debug-events command](#the-debug-events-command)
+ - [Listing recorded jobs](#listing-recorded-jobs)
+
+
+
+## Controller operations
+
+The `controller` is essentially GARM itself. Every deployment of GARM will have its own controller ID which will be used to tag runners in github. The controller is responsible for managing runners, webhooks, repositories, organizations and enterprises. There are a few settings at the controller level which you can tweak, which we will cover below.
+
+### Listing controller info
+
+You can list the controller info by running the following command:
+
+```bash
+garm-cli controller show
++-------------------------+----------------------------------------------------------------------------+
+| FIELD | VALUE |
++-------------------------+----------------------------------------------------------------------------+
+| Controller ID | a4dd5f41-8e1e-42a7-af53-c0ba5ff6b0b3 |
+| Hostname | garm |
+| Metadata URL | https://garm.example.com/api/v1/metadata |
+| Callback URL | https://garm.example.com/api/v1/callbacks |
+| Webhook Base URL | https://garm.example.com/webhooks |
+| Controller Webhook URL | https://garm.example.com/webhooks/a4dd5f41-8e1e-42a7-af53-c0ba5ff6b0b3 |
+| Minimum Job Age Backoff | 30 |
+| Version | v0.1.6 |
++-------------------------+----------------------------------------------------------------------------+
+```
+
+There are several things of interest in this output.
+
+* `Controller ID` - This is the unique identifier of the controller. Each GARM installation, on first run will automatically generate a unique controller ID. This is important for several reasons. For one, it allows us to run several GARM controllers on the same repos/orgs/enterprises, without accidentally clashing with each other. Each runner started by a GARM controller, will be tagged with this controller ID in order to easily identify runners that we manage.
+* `Hostname` - This is the hostname of the machine where GARM is running. This is purely informative.
+* `Metadata URL` - This URL is configured by the user, and is the URL that is presented to the runners via userdata when they get set up. Runners will connect to this URL and retrieve information they might need to set themselves up. GARM cannot automatically determine this URL, as it is dependent on the user's network setup. GARM may be hidden behind a load balancer or a reverse proxy, in which case, the URL by which the GARM controller can be accessed may be different than the IP addresses that are locally visible to GARM. Runners must be able to connect to this URL.
+* `Callback URL` - This URL is configured by the user, and is the URL that is presented to the runners via userdata when they get set up. Runners will connect to this URL and send status updates and system information (OS version, OS name, github runner agent ID, etc) to the controller. Runners must be able to connect to this URL.
+* `Webhook Base URL` - This is the base URL for webhooks. It is configured by the user in the GARM config file. This URL can be called into by GitHub itself when hooks get triggered by a workflow. GARM needs to know when a new job is started in order to schedule the creation of a new runner. Job webhooks sent to this URL will be recorded by GARM and acted upon. While you can configure this URL directly in your GitHub repo settings, it is advised to use the `Controller Webhook URL` instead, as it is unique to each controller, and allows you to potentially install multiple GARM controller inside the same repo. Github must be able to connect to this URL.
+* `Controller Webhook URL` - This is the URL that GitHub will call into when a webhook is triggered. This URL is unique to each GARM controller and is the preferred URL to use in order to receive webhooks from GitHub. It serves the same purpose as the `Webhook Base URL`, but is unique to each controller, allowing you to potentially install multiple GARM controllers inside the same repo. Github must be able to connect to this URL.
+* `Minimum Job Age Backoff` - This is the job age in seconds, after which GARM will consider spinning up a new runner to handle it. By default GARM waits for 30 seconds after receiving a new job, before it spins up a runner. This delay is there to allow any existing idle runners (managed by GARM or not) to pick up the job, before reacting to it. This way we avoid being too eager and spin up a runner for a job that would have been picked up by an existing runner anyway. You can set this to 0 if you want GARM to react immediately.
+* `Version` - This is the version of GARM that is running.
+
+We will see the `Controller Webhook URL` later when we set up the GitHub repo to send webhooks to GARM.
+
+### Updating controller settings
+
+As we've mentioned before, there are 3 URLs that are very important for normal operations:
+
+* `metadata_url` - Must be reachable by runners
+* `callback_url` - Must be reachable by runners
+* `webhook_url` - Must be reachable by GitHub
+
+These URLs depend heavily on how GARM was set up and what the network topology of the user is set up. GARM may be behind a NAT or reverse proxy. There may be different hostnames/URL paths set up for each of the above, etc. The short of it is that we cannot determine these URLs reliably and we must ask the user to tell GARM what they are.
+
+We can assume that the URL that the user logs in at to manage garm is the same URL that the rest of the URLs are present at, but that is just an assumption. By default, when you initialize GARM for the first time, we make this assumption to make things easy. It's also safe to assume that most users will do this anyway, but in case you don't, you will need to update the URLs in the controller and tell GARM what they are.
+
+In the previous section we saw that most URLs were set to `https://garm.example.com`. The URL path was the same as the routes that GARM sets up. For example, the `metadata_url` has `/api/v1/metadata`. The `callback_url` has `/api/v1/callbacks` and the `webhook_url` has `/webhooks`. This is the default setup and is what most users will use.
+
+If you need to update these URLs, you can use the following command:
+
+```bash
+garm-cli controller update \
+ --metadata-url https://garm.example.com/api/v1/metadata \
+ --callback-url https://garm.example.com/api/v1/callbacks \
+ --webhook-url https://garm.example.com/webhooks
+```
+
+The `Controller Webhook URL` you saw in the previous section is automatically calculated by GARM and is essentially the `webhook_url` with the controller ID appended to it. This URL is unique to each controller and is the preferred URL to use in order to receive webhooks from GitHub.
+
+After updating the URLs, make sure that they are properly routed to the appropriate API endpoint in GARM **and** that they are accessible by the interested parties (runners or github).
+
+## Providers
+
+GARM uses providers to create runners. These providers are external executables that GARM calls into to create runners in a particular IaaS.
+
+### Listing configured providers
+
+Once configured (see [provider configuration](/doc/config.md#providers)), you can list the configured providers by running the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli provider list
++--------------+---------------------------------+----------+
+| NAME | DESCRIPTION | TYPE |
++--------------+---------------------------------+----------+
+| incus | Incus external provider | external |
++--------------+---------------------------------+----------+
+| lxd | LXD external provider | external |
++--------------+---------------------------------+----------+
+| openstack | OpenStack external provider | external |
++--------------+---------------------------------+----------+
+| azure | Azure provider | external |
++--------------+---------------------------------+----------+
+| k8s_external | k8s external provider | external |
++--------------+---------------------------------+----------+
+| Amazon EC2 | Amazon EC2 provider | external |
++--------------+---------------------------------+----------+
+| equinix | Equinix Metal | external |
++--------------+---------------------------------+----------+
+```
+
+Each of these providers can be used to set up a runner pool for a repository, organization or enterprise.
+
+## Github Endpoints
+
+GARM can be used to manage runners for repos, orgs and enterprises hosted on `github.com` or on a GitHub Enterprise Server.
+
+Endpoints are the way that GARM identifies where the credentials and entities you create are located and where the API endpoints for the GitHub API can be reached, along with a possible CA certificate that validates the connection. There is a default endpoint for `github.com`, so you don't need to add it, unless you're using GHES.
+
+### Creating a GitHub Endpoint
+
+To create a GitHub endpoint, you can run the following command:
+
+```bash
+garm-cli github endpoint create \
+ --base-url https://ghes.example.com \
+ --api-base-url https://api.ghes.example.com \
+ --upload-url https://upload.ghes.example.com \
+ --ca-cert-path $HOME/ca-cert.pem \
+ --name example \
+ --description "Just an example ghes endpoint"
++----------------+------------------------------------------------------------------+
+| FIELD | VALUE |
++----------------+------------------------------------------------------------------+
+| Name | example |
+| Base URL | https://ghes.example.com |
+| Upload URL | https://upload.ghes.example.com |
+| API Base URL | https://api.ghes.example.com |
+| CA Cert Bundle | -----BEGIN CERTIFICATE----- |
+| | MIICBzCCAY6gAwIBAgIQX7fEm3dxkTeSc+E1uTFuczAKBggqhkjOPQQDAzA2MRkw |
+| | FwYDVQQKExBHQVJNIGludGVybmFsIENBMRkwFwYDVQQDExBHQVJNIGludGVybmFs |
+| | IENBMB4XDTIzMDIyNTE4MzE0NloXDTMzMDIyMjE4MzE0NlowNjEZMBcGA1UEChMQ |
+| | R0FSTSBpbnRlcm5hbCBDQTEZMBcGA1UEAxMQR0FSTSBpbnRlcm5hbCBDQTB2MBAG |
+| | ByqGSM49AgEGBSuBBAAiA2IABKat241Jzvkl+ksDuPq5jFf9wb5/l54NbGYYfcrs |
+| | 4d9/sNXtPP1y8pM61hs+hCltN9UEwtxqr48q5G7Oc3IjH/dddzJTDC2bLcpwysrC |
+| | NYLGtSfNj+o/8AQMwwclAY7t4KNhMF8wDgYDVR0PAQH/BAQDAgIEMB0GA1UdJQQW |
+| | MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW |
+| | BBSY+cSG07sIU2UC+fOniODKUGqiUTAKBggqhkjOPQQDAwNnADBkAjBcFz3cZ7vO |
+| | IFVzqn9eqXMmZDGp58HGneHhFhJsJtQE4BkxGQmgZJ2OgTGXDqjXG3wCMGMQRALt |
+| | JxwlI1PJJj7M0g48viS4NjT4kq2t/UFIbTy78aarFynUfykpL9FD9NOmiQ== |
+| | -----END CERTIFICATE----- |
+| | |
++----------------+------------------------------------------------------------------+
+```
+
+The name of the endpoint needs to be unique within GARM.
+
+### Listing GitHub Endpoints
+
+To list existing GitHub endpoints, run the following command:
+
+```bash
+garm-cli github endpoint list
++------------+--------------------------+-------------------------------+
+| NAME | BASE URL | DESCRIPTION |
++------------+--------------------------+-------------------------------+
+| github.com | https://github.com | The github.com endpoint |
++------------+--------------------------+-------------------------------+
+| example | https://ghes.example.com | Just an example ghes endpoint |
++------------+--------------------------+-------------------------------+
+```
+
+### Getting information about an endpoint
+
+To get information about a specific endpoint, you can run the following command:
+
+```bash
+garm-cli github endpoint show github.com
++--------------+-----------------------------+
+| FIELD | VALUE |
++--------------+-----------------------------+
+| Name | github.com |
+| Base URL | https://github.com |
+| Upload URL | https://uploads.github.com/ |
+| API Base URL | https://api.github.com/ |
++--------------+-----------------------------+
+```
+
+### Deleting a GitHub Endpoint
+
+You can delete an endpoint unless any of the following conditions are met:
+
+* The endpoint is the default endpoint for `github.com`
+* The endpoint is in use by a repository, organization or enterprise
+* There are credentials defined against the endpoint you are trying to remove
+
+To delete an endpoint, you can run the following command:
+
+```bash
+garm-cli github endpoint delete example
+```
+
+## GitHub credentials
+
+GARM needs access to your GitHub repositories, organizations or enterprise in order to manage runners. This is done via a [GitHub personal access token or via a GitHub App](/doc/github_credentials.md). You can configure multiple tokens or apps with access to various repositories, organizations or enterprises, either on GitHub or on GitHub Enterprise Server.
+
+### Adding GitHub credentials
+
+There are two types of credentials:
+
+* PAT - Personal Access Token
+* App - GitHub App
+
+To add each of these types of credentials, slightly different command line arguments (obviously) are required. I'm going to give you an example of both.
+
+To add a PAT, you can run the following command:
+
+```bash
+garm-cli github credentials add \
+ --name deleteme \
+ --description "just a test" \
+ --auth-type pat \
+ --pat-oauth-token gh_yourTokenGoesHere \
+ --endpoint github.com
+```
+
+To add a GitHub App (only available for repos and orgs), you can run the following command:
+
+```bash
+garm-cli github credentials add \
+ --name deleteme-app \
+ --description "just a test" \
+ --endpoint github.com \
+ --auth-type app \
+ --app-id 1 \
+ --app-installation-id 99 \
+ --private-key-path /etc/garm/yiourGarmAppKey.2024-12-12.private-key.pem
+```
+
+Notice that in both cases we specified the github endpoint for which these credentials are valid.
+
+### Listing GitHub credentials
+
+To list existing credentials, run the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli github credentials ls
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| ID | NAME | DESCRIPTION | BASE URL | API URL | UPLOAD URL | TYPE |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| 1 | gabriel | github token or user gabriel | https://github.com | https://api.github.com/ | https://uploads.github.com/ | pat |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| 2 | gabriel_org | github token with org level access | https://github.com | https://api.github.com/ | https://uploads.github.com/ | app |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+```
+
+For more information about credentials, see the [github credentials](/doc/github_credentials.md) section for more details.
+
+### Getting detailed information about credentials
+
+To get detailed information about one specific credential, you can run the following command:
+
+```bash
+garm-cli github credentials show 2
++---------------+------------------------------------+
+| FIELD | VALUE |
++---------------+------------------------------------+
+| ID | 2 |
+| Name | gabriel_org |
+| Description | github token with org level access |
+| Base URL | https://github.com |
+| API URL | https://api.github.com/ |
+| Upload URL | https://uploads.github.com/ |
+| Type | app |
+| Endpoint | github.com |
+| | |
+| Repositories | gsamfira/garm-testing |
+| | |
+| Organizations | gsamfira |
++---------------+------------------------------------+
+```
+
+### Deleting GitHub credentials
+
+To delete a credential, you can run the following command:
+
+```bash
+garm-cli github credentials delete 2
+```
+
+> **NOTE**: You may not delete credentials that are currently associated with a repository, organization or enterprise. You will need to first replace the credentials on the entity, and then you can delete the credentials.
+
+## Repositories
+
+### Adding a new repository
+
+To add a new repository we need to use credentials that has access to the repository. We've listed credentials above, so let's add our first repository:
+
+```bash
+ubuntu@garm:~$ garm-cli repository add \
+ --name garm \
+ --owner gabriel-samfira \
+ --credentials gabriel \
+ --install-webhook \
+ --pool-balancer-type roundrobin \
+ --random-webhook-secret
++----------------------+--------------------------------------+
+| FIELD | VALUE |
++----------------------+--------------------------------------+
+| ID | 0c91d9fd-2417-45d4-883c-05daeeaa8272 |
+| Owner | gabriel-samfira |
+| Name | garm |
+| Pool balancer type | roundrobin |
+| Credentials | gabriel |
+| Pool manager running | true |
++----------------------+--------------------------------------+
+```
+
+Lets break down the command a bit and explain what happened above. We added a new repository to GARM, that belogs to the user `gabriel-samfira` and is called `garm`. When using GitHub, this translates to `https://github.com/gabriel-samfira/garm`.
+
+As part of the above command, we used the credentials called `gabriel` to authenticate to GitHub. If those credentials didn't have access to the repository, we would have received an error when adding the repo.
+
+The other interesting bit about the above command is that we automatically added the `webhook` to the repository and generated a secure random secret to authenticate the webhooks that come in from GitHub for this new repo. Any webhook claiming to be for the `gabriel-samfira/garm` repo, will be validated against the secret that was generated.
+
+Another important aspect to remember is that once the entity (in this case a repository) is created, the credentials associated with the repo at creation time, dictates the GitHub endpoint in which this repository exists.
+
+When updating credentials for this entity, the new credentials **must** be associated with the same endpoint as the old ones. An error is returned if the repo is associated with `github.com` but the new credentials you're trying to set are associated with a GHES endpoint.
+
+### Listing repositories
+
+To list existing repositories, run the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli repository list
++--------------------------------------+-----------------+--------------+------------------+--------------------+------------------+
+| ID | OWNER | NAME | CREDENTIALS NAME | POOL BALANCER TYPE | POOL MGR RUNNING |
++--------------------------------------+-----------------+--------------+------------------+--------------------+------------------+
+| be3a0673-56af-4395-9ebf-4521fea67567 | gabriel-samfira | garm | gabriel | roundrobin | true |
++--------------------------------------+-----------------+--------------+------------------+--------------------+------------------+
+```
+
+This will list all the repositories that GARM is currently managing.
+
+### Removing a repository
+
+To remove a repository, you can use the following command:
+
+```bash
+garm-cli repository delete be3a0673-56af-4395-9ebf-4521fea67567
+```
+
+This will remove the repository from GARM, and if a webhook was installed, will also clean up the webhook from the repository.
+
+> **NOTE**: GARM will not remove a webhook that points to the `Base Webhook URL`. It will only remove webhooks that are namespaced to the running controller.
+
+## Organizations
+
+### Adding a new organization
+
+Adding a new organization is similar to adding a new repository. You need to use credentials that have access to the organization, and you can add the organization to GARM using the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli organization add \
+ --credentials gabriel_org \
+ --name gsamfira \
+ --install-webhook \
+ --random-webhook-secret
++----------------------+--------------------------------------+
+| FIELD | VALUE |
++----------------------+--------------------------------------+
+| ID | b50f648d-708f-48ed-8a14-cf58887af9cf |
+| Name | gsamfira |
+| Credentials | gabriel_org |
+| Pool manager running | true |
++----------------------+--------------------------------------+
+```
+
+This will add the organization `gsamfira` to GARM, and install a webhook for it. The webhook will be validated against the secret that was generated. The only difference between adding an organization and adding a repository is that you use the `organization` subcommand instead of the `repository` subcommand, and the `--name` option represents the `name` of the organization.
+
+Managing webhooks for organizations is similar to managing webhooks for repositories. You can *list*, *show*, *install* and *uninstall* webhooks for organizations using the `garm-cli organization webhook` subcommand. We won't go into details here, as it's similar to managing webhooks for repositories.
+
+All the other operations that exist on repositories, like listing, removing, etc, also exist for organizations and enterprises. Check out the help for the `garm-cli organization` subcommand for more details.
+
+## Enterprises
+
+### Adding an enterprise
+
+Enterprises are a bit special. Currently we don't support managing webhooks for enterprises, mainly because the level of access that would be required to do so seems a bit too much to enable in GARM itself. And considering that you'll probably ever only have one enterprise with multiple organizations and repositories, the effort/risk to benefit ratio makes this feature not worth implementing at the moment.
+
+To add an enterprise to GARM, you can use the following command:
+
+```bash
+garm-cli enterprise add \
+ --credentials gabriel_enterprise \
+ --name samfira \
+ --webhook-secret SuperSecretWebhookTokenPleaseReplaceMe
+```
+
+The `name` of the enterprise is the ["slug" of the enterprise](https://docs.github.com/en/enterprise-cloud@latest/admin/managing-your-enterprise-account/creating-an-enterprise-account).
+
+You will then have to manually add the `Controller Webhook URL` to the enterprise in the GitHub UI.
+
+All the other operations that exist on repositories, like listing, removing, etc, also exist for organizations and enterprises. Have a look at the help for the `garm-cli enterprise` subcommand for more details.
+
+At that point the enterprise will be added to GARM and you can start managing runners for it.
+
+## Managing webhooks
+
+Webhook management is available for repositories and organizations. I'm going to show you how to manage webhooks for a repository, but the same commands apply for organizations. See `--help` for more details.
+
+When we added the repository in the previous section, we specified the `--install-webhook` and the `--random-webhook-secret` options. These two options automatically added a webhook to the repository and generated a random secret for it. The `webhook` URL that was used, will correspond to the `Controller Webhook URL` that we saw earlier when we listed the controller info. Let's list it and see what it looks like:
+
+```bash
+ubuntu@garm:~$ garm-cli repository webhook show be3a0673-56af-4395-9ebf-4521fea67567
++--------------+----------------------------------------------------------------------------+
+| FIELD | VALUE |
++--------------+----------------------------------------------------------------------------+
+| ID | 460257636 |
+| URL | https://garm.example.com/webhooks/a4dd5f41-8e1e-42a7-af53-c0ba5ff6b0b3 |
+| Events | [workflow_job] |
+| Active | true |
+| Insecure SSL | false |
++--------------+----------------------------------------------------------------------------+
+```
+
+We can see that it's active, and the events to which it subscribed.
+
+The `--install-webhook` and `--random-webhook-secret` options are convenience options that allow you to quickly add a new repository to GARM and have it ready to receive webhooks from GitHub. As long as you configured the URLs correctly (see previous sections for details), you should see a green checkmark in the GitHub settings page, under `Webhooks`.
+
+If you don't want to install the webhook, you can add the repository without it, and then install it later using the `garm-cli repository webhook install` command (which we'll show in a second) or manually add it in the GitHub UI.
+
+To uninstall a webhook from a repository, you can use the following command:
+
+```bash
+garm-cli repository webhook uninstall be3a0673-56af-4395-9ebf-4521fea67567
+```
+
+After which listing the webhook will show that it's inactive:
+
+```bash
+ubuntu@garm:~$ garm-cli repository webhook show be3a0673-56af-4395-9ebf-4521fea67567
+Error: [GET /repositories/{repoID}/webhook][404] GetRepoWebhookInfo default {Error:Not Found Details:hook not found}
+```
+
+You can always add it back using:
+
+```bash
+ubuntu@garm:~$ garm-cli repository webhook install be3a0673-56af-4395-9ebf-4521fea67567
++--------------+----------------------------------------------------------------------------+
+| FIELD | VALUE |
++--------------+----------------------------------------------------------------------------+
+| ID | 460258767 |
+| URL | https://garm.example.com/webhooks/a4dd5f41-8e1e-42a7-af53-c0ba5ff6b0b3 |
+| Events | [workflow_job] |
+| Active | true |
+| Insecure SSL | false |
++--------------+----------------------------------------------------------------------------+
+```
+
+To allow GARM to manage webhooks, the PAT or app you're using must have the `admin:repo_hook` and `admin:org_hook` scopes (or equivalent). Webhook management is not available for enterprises. For enterprises you will have to add the webhook manually.
+
+To manually add a webhook, see the [webhooks](/doc/webhooks.md) section.
+
+## Pools
+
+### Creating a runner pool
+
+Now that we have a repository, organization or enterprise added to GARM, we can create a runner pool for it. A runner pool is a collection of runners of the same type, that are managed by GARM and are used to run workflows for the repository, organization or enterprise.
+
+You can create multiple pools of runners for the same entity (repository, organization or enterprise), and you can create multiple pools of runners, each pool defining different runner types. For example, you can have a pool of runners that are created on AWS, and another pool of runners that are created on Azure, k8s, LXD, etc. For repositories or organizations with complex needs, you can set up a number of pools that cover a wide range of needs, based on cost, capability (GPUs, FPGAs, etc) or sheer raw computing power. You don't have to pick just one, especially since managing all of them is done using the exact same commands, as we'll show below.
+
+Before we create a pool, we have to decide which provider we want to use. We've listed the providers above, so let's pick one and create a pool of runners for our repository. For the purpose of this example, we'll use the `incus` provider. We'll show you how to create a pool using this provider, but keep in mind that adding another pool using a different provider is done using the exact same commands. The only difference will be in the `--image`, `--flavor` and `--extra-specs` options that you'll use when creating the pool.
+
+Out of those three options, only the `--image` and `--flavor` are mandatory. The `--extra-specs` flag is optional and is used to pass additional information to the provider when creating the pool. The `--extra-specs` option is provider specific, and you'll have to consult the provider documentation to see what options are available.
+
+But I digress. Let's create a pool of runners using the `incus` provider, for the `gabriel-samfira/garm` repository we created above:
+
+```bash
+garm-cli pool add \
+ --enabled=false \
+ --repo be3a0673-56af-4395-9ebf-4521fea67567 \
+ --image "images:ubuntu/22.04/cloud" \
+ --flavor default \
+ --provider-name incus \
+ --min-idle-runners 1 \
+ --tags ubuntu,incus
++--------------------------+----------------------------------------+
+| FIELD | VALUE |
++--------------------------+----------------------------------------+
+| ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Provider Name | incus |
+| Image | images:ubuntu/22.04/cloud |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, incus |
+| Belongs to | gabriel-samfira/garm |
+| Level | repo |
+| Enabled | false |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+----------------------------------------+
+```
+
+Let's unpack the command and explain what happened above. We added a new pool of runners to GARM, that belongs to the `gabriel-samfira/garm` repository. We used the `incus` provider to create the pool, and we specified the `--image` and `--flavor` options to tell the provider what kind of runners we want to create. On Incus and LXD, the flavor maps to a `profile`. The profile can specify the resources allocated to a container or VM (RAM, CPUs, disk space, etc). The image maps to an incus or LXD image, as you would normally use when spinning up a new container or VM using the `incus launch` command.
+
+We also specified the `--min-idle-runners` option to tell GARM to always keep at least 1 runner idle in the pool. This is useful for repositories that have a lot of workflows that run often, and we want to make sure that we always have a runner ready to pick up a job.
+
+If we review the output of the command, we can see that the pool was created with a maximum number of 5 runners. This is just a default we can tweak when creating the pool, or later using the `garm-cli pool update` command. We can also see that the pool was created with a runner botstrap timeout of 20 minutes. This timeout is important on provider where the instance may take a long time to spin up. For example, on Equinix Metal, some operating systems can take a few minutes to install and reboot. This timeout can be tweaked to a higher value to account for this.
+
+The pool was created with the `--enabled` flag set to `false`, so the pool won't create any runners yet:
+
+```bash
+ubuntu@garm:~$ garm-cli runner list 9daa34aa-a08a-4f29-a782-f54950d8521a
++----+------+--------+---------------+---------+
+| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
++----+------+--------+---------------+---------+
++----+------+--------+---------------+---------+
+```
+
+### Listing pools
+
+To list pools created for a repository you can run:
+
+```bash
+ubuntu@garm:~$ garm-cli pool list --repo=be3a0673-56af-4395-9ebf-4521fea67567
++--------------------------------------+---------------------------+---------+--------------+------------+-------+---------+---------------+
+| ID | IMAGE | FLAVOR | TAGS | BELONGS TO | LEVEL | ENABLED | RUNNER PREFIX |
++--------------------------------------+---------------------------+---------+--------------+------------+-------+---------+---------------+
+| 9daa34aa-a08a-4f29-a782-f54950d8521a | images:ubuntu/22.04/cloud | default | ubuntu incus | | | false | garm |
++--------------------------------------+---------------------------+---------+--------------+------------+-------+---------+---------------+
+```
+
+If you want to list pools for an organization or enterprise, you can use the `--org` or `--enterprise` options respectively.
+
+In the absence or the `--repo`, `--org` or `--enterprise` options, the command will list all pools in GARM, regardless of the entity they belong to.
+
+```bash
+ubuntu@garm:~/garm$ garm-cli pool list
++--------------------------------------+---------------------------+--------------+-----------------------------------------+------------------+-------+---------+---------------+----------+
+| ID | IMAGE | FLAVOR | TAGS | BELONGS TO | LEVEL | ENABLED | RUNNER PREFIX | PRIORITY |
++--------------------------------------+---------------------------+--------------+-----------------------------------------+------------------+-------+---------+---------------+----------+
+| 8935f6a6-f20f-4220-8fa9-9075e7bd7741 | windows_2022 | c3.small.x86 | self-hosted x64 Windows windows equinix | gsamfira/scripts | repo | false | garm | 0 |
++--------------------------------------+---------------------------+--------------+-----------------------------------------+------------------+-------+---------+---------------+----------+
+| 9233b3f5-2ccf-4689-8f86-a8a0d656dbeb | runner-upstream:latest | small | self-hosted x64 Linux k8s org | gsamfira | org | false | garm | 0 |
++--------------------------------------+---------------------------+--------------+-----------------------------------------+------------------+-------+---------+---------------+----------+
+```
+
+### Showing pool info
+
+You can get detailed information about a pool by running the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli pool show 9daa34aa-a08a-4f29-a782-f54950d8521a
++--------------------------+----------------------------------------+
+| FIELD | VALUE |
++--------------------------+----------------------------------------+
+| ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Provider Name | incus |
+| Image | images:ubuntu/22.04/cloud |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, incus |
+| Belongs to | gabriel-samfira/garm |
+| Level | repo |
+| Enabled | false |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+----------------------------------------+
+```
+
+### Deleting a pool
+
+In order to delete a pool, you must first make sure there are no runners in the pool. To ensure this, we can first disable the pool, to make sure no new runners are created, remove the runners or allow them to be user, then we can delete the pool.
+
+To disable a pool, you can use the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli pool update 9daa34aa-a08a-4f29-a782-f54950d8521a --enabled=false
++--------------------------+----------------------------------------+
+| FIELD | VALUE |
++--------------------------+----------------------------------------+
+| ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Provider Name | incus |
+| Image | images:ubuntu/22.04/cloud |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, incus |
+| Belongs to | gabriel-samfira/garm |
+| Level | repo |
+| Enabled | false |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+----------------------------------------+
+```
+
+If there are no runners in the pool, you can then remove it:
+
+```bash
+ubuntu@garm:~$ garm-cli pool delete 9daa34aa-a08a-4f29-a782-f54950d8521a
+```
+
+### Update a pool
+
+You can update a pool by using the `garm-cli pool update` command. Nearly every aspect of a pool can be updated after it has been created. To demonstrate the command, we can enable the pool we created earlier:
+
+```bash
+ubuntu@garm:~$ garm-cli pool update 9daa34aa-a08a-4f29-a782-f54950d8521a --enabled=true
++--------------------------+----------------------------------------+
+| FIELD | VALUE |
++--------------------------+----------------------------------------+
+| ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Provider Name | incus |
+| Image | images:ubuntu/22.04/cloud |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, incus |
+| Belongs to | gabriel-samfira/garm |
+| Level | repo |
+| Enabled | true |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+----------------------------------------+
+```
+
+See `garm-cli pool update --help` for a list of settings that can be changed.
+
+Now that the pool is enabled, GARM will start creating runners for it. We can list the runners in the pool to see if any have been created:
+
+```bash
+ubuntu@garm:~$ garm-cli runner list 9daa34aa-a08a-4f29-a782-f54950d8521a
++----+-------------------+---------+---------------+--------------------------------------+
+| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
++----+-------------------+---------+---------------+--------------------------------------+
+| 1 | garm-BFrp51VoVBCO | running | installing | 9daa34aa-a08a-4f29-a782-f54950d8521a |
++----+-------------------+---------+---------------+--------------------------------------+
+```
+
+We can see that a runner has been created and is currently being installed. If we check incus, we should also see it there as well:
+
+```bash
+root@incus:~# incus list
++-------------------+---------+----------------------+-----------------------------------------------+-----------+-----------+
+| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS |
++-------------------+---------+----------------------+-----------------------------------------------+-----------+-----------+
+| garm-BFrp51VoVBCO | RUNNING | 10.23.120.217 (eth0) | fd42:e6ea:8b6c:6cb9:216:3eff:feaa:fabf (eth0) | CONTAINER | 0 |
++-------------------+---------+----------------------+-----------------------------------------------+-----------+-----------+
+```
+
+Awesome! This runner will be able to pick up jobs that match the labels we've set on the pool.
+
+## Runners
+
+### Listing runners
+
+You can list runners for a pool, for a repository, organization or enterprise, or for all of them. To list all runners, you can run:
+
+```bash
+ubuntu@garm:~$ garm-cli runner list
++----+---------------------+---------+---------------+--------------------------------------+
+| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
++----+---------------------+---------+---------------+--------------------------------------+
+| 1 | garm-jZWtnxYHR6sG | running | idle | 8ec34c1f-b053-4a5d-80d6-40afdfb389f9 |
++----+---------------------+---------+---------------+--------------------------------------+
+| 2 | garm-2vtBBaT2dgIvFg | running | idle | c03c8101-3ae0-49d7-98b7-298a3689d24c |
++----+---------------------+---------+---------------+--------------------------------------+
+| 3 | garm-Ew7SzN6LVlEC | running | idle | 577627f4-1add-4a45-9c62-3a7cbdec8403 |
++----+---------------------+---------+---------------+--------------------------------------+
+| 4 | garm-BFrp51VoVBCO | running | idle | 9daa34aa-a08a-4f29-a782-f54950d8521a |
++----+---------------------+---------+---------------+--------------------------------------+
+```
+
+Have a look at the help command for the flags available to the `list` subcommand.
+
+### Showing runner info
+
+You can get detailed information about a runner by running the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli runner show garm-BFrp51VoVBCO
++-----------------+------------------------------------------------------------------------------------------------------+
+| FIELD | VALUE |
++-----------------+------------------------------------------------------------------------------------------------------+
+| ID | b332a811-0ebf-474c-9997-780124e22382 |
+| Provider ID | garm-BFrp51VoVBCO |
+| Name | garm-BFrp51VoVBCO |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| OS Name | Ubuntu |
+| OS Version | 22.04 |
+| Status | running |
+| Runner Status | idle |
+| Pool ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Addresses | 10.23.120.217 |
+| | fd42:e6ea:8b6c:6cb9:216:3eff:feaa:fabf |
+| Status Updates | 2024-02-11T23:39:54: downloading tools from https://github.com/actions/runner/releases/download/v2.3 |
+| | 12.0/actions-runner-linux-x64-2.312.0.tar.gz |
+| | 2024-02-11T23:40:04: extracting runner |
+| | 2024-02-11T23:40:07: installing dependencies |
+| | 2024-02-11T23:40:13: configuring runner |
+| | 2024-02-11T23:40:13: runner registration token was retrieved |
+| | 2024-02-11T23:40:19: runner successfully configured after 1 attempt(s) |
+| | 2024-02-11T23:40:20: installing runner service |
+| | 2024-02-11T23:40:20: starting service |
+| | 2024-02-11T23:40:21: runner successfully installed |
++-----------------+------------------------------------------------------------------------------------------------------+
+```
+
+### Deleting a runner
+
+You can delete a runner by running the following command:
+
+```bash
+garm-cli runner rm garm-BFrp51VoVBCO
+```
+
+Only idle runners can be removed. If a runner is executing a job, it cannot be removed. However, a runner that is currently running a job, will be removed anyway when that job finishes. You can wait for the job to finish or you can cancel the job from the github workflow page.
+
+In some cases, providers may error out when creating or deleting a runner. This can happen if the provider is misconfigured. To avoid situations in which GARM gets deadlocked trying to remove a runner from a provider that is in err, we can forcefully remove a runner. The `--force` flag will make GARM ignore any error returned by the provider when attempting to delete an instance:
+
+```bash
+garm-cli runner remove --force garm-BFrp51VoVBCO
+```
+
+Awesome! We've covered all the major parts of using GARM. This is all you need to have your workflows run on your self-hosted runners. Of course, each provider may have its own particularities, config options, extra specs and caveats (all of which should be documented in the provider README), but once added to the GARM config, creating a pool should be the same.
+
+## The debug-log command
+
+GARM outputs logs to standard out, log files and optionally to a websocket for easy debugging. This is just a convenience feature that allows you to stream logs to your terminal without having to log into the server. It's disabled by default, but if you enable it, you'll be able to run:
+
+```bash
+ubuntu@garm:~$ garm-cli debug-log
+time=2024-02-12T08:36:18.584Z level=INFO msg=access_log method=GET uri=/api/v1/ws user_agent=Go-http-client/1.1 ip=127.0.0.1:47260 code=200 bytes=0 request_time=447.445µs
+time=2024-02-12T08:36:31.251Z level=INFO msg=access_log method=GET uri=/api/v1/instances user_agent=Go-http-client/1.1 ip=127.0.0.1:58460 code=200 bytes=1410 request_time=656.184µs
+```
+
+This will bring a real-time log to your terminal. While this feature should be fairly secure, I encourage you to only expose it within networks you know are secure. This can be done by configuring a reverse proxy in front of GARM that only allows connections to the websocket endpoint from certain locations.
+
+## The debug-events command
+
+Starting with GARM v0.1.5 a new command has been added to the CLI that consumes database events recorded by GARM. Whenever something is updated in the database, a new event is generated. These events are generated by the database watcher and are also exported via a websocket endpoint. This websocket endpoint is meant to be consumed by applications that wish to integrate GARM and want to avoid having to poll the API.
+
+This command is not meant to be used to integrate GARM events, it is mearly a debug tool that allows you to see what events are being generated by GARM. To use it, you can run:
+
+```bash
+garm-cli debug-events --filters='{"send-everything": true}'
+```
+
+This command will send all events to your CLI as they happen. You can also filter by entity or operation like so:
+
+```bash
+garm-cli debug-events --filters='{"filters": [{"entity-type": "instance", "operations": ["create", "delete"]}, {"entity-type": "pool"}, {"entity-type": "controller"}]}'
+```
+
+The payloads that get sent to your terminal are described in the [events](/doc/events.md) section, but the short description is that you get the operation type (create, update, delete), the entity type (instance, pool, repo, etc) and the json payload as you normaly would when you fetch them through the API. Sensitive info like tokens or passwords are never returned.
+
+## Listing recorded jobs
+
+GARM will record any job that comes in and for which we have a pool configured. If we don't have a pool for a particular job, then that job is ignored. There is no point in recording jobs that we can't do anything about. It would just bloat the database for no reason.
+
+To view existing jobs, run the following command:
+
+```bash
+garm-cli job list
+```
+
+If you've just set up GARM and have not yet created a pool or triggered a job, this will be empty. If you've configured everything and still don't receive jobs, you'll need to make sure that your URLs (discussed at the begining of this article), are correct. GitHub needs to be able to reach the webhook URL that our GARM instance listens on.
\ No newline at end of file
diff --git a/doc/webhooks.md b/doc/webhooks.md
index d2690bfb..ab29937b 100644
--- a/doc/webhooks.md
+++ b/doc/webhooks.md
@@ -34,7 +34,7 @@ Make a note of that secret, as you'll need it later when you define the repo/org

-While you can use `http` for your webhook, I highly recommend you set up a proper x509 certificate for your GARM server and use `https` instead. If you choose `https`, GitHub will present you with an aditional option to configure the SSL certificate verification.
+While you can use `http` for your webhook, I highly recommend you set up a proper x509 certificate for your GARM server and use `https` instead. If you choose `https`, GitHub will present you with an additional option to configure the SSL certificate verification.

diff --git a/go.mod b/go.mod
index 150c2b3b..1ef71c9d 100644
--- a/go.mod
+++ b/go.mod
@@ -1,109 +1,88 @@
module github.com/cloudbase/garm
-go 1.20
+go 1.24.6
require (
- github.com/BurntSushi/toml v1.2.1
- github.com/cloudbase/garm-provider-common v0.0.0-20230724114054-7aa0a3dfbce0
- github.com/go-openapi/errors v0.20.4
- github.com/go-openapi/runtime v0.26.0
- github.com/go-openapi/strfmt v0.21.7
- github.com/golang-jwt/jwt v3.2.2+incompatible
- github.com/google/go-github/v53 v53.2.0
- github.com/google/uuid v1.3.0
- github.com/gorilla/handlers v1.5.1
- github.com/gorilla/mux v1.8.0
- github.com/gorilla/websocket v1.5.0
- github.com/jedib0t/go-pretty/v6 v6.4.6
- github.com/juju/clock v1.0.3
- github.com/juju/retry v1.0.0
- github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce
+ github.com/BurntSushi/toml v1.5.0
+ github.com/bradleyfalzon/ghinstallation/v2 v2.16.0
+ github.com/cloudbase/garm-provider-common v0.1.7
+ github.com/felixge/httpsnoop v1.0.4
+ github.com/go-openapi/errors v0.22.2
+ github.com/go-openapi/runtime v0.28.0
+ github.com/go-openapi/strfmt v0.23.0
+ github.com/go-openapi/swag v0.23.1
+ github.com/golang-jwt/jwt/v5 v5.3.0
+ github.com/google/go-github/v72 v72.0.0
+ github.com/google/uuid v1.6.0
+ github.com/gorilla/handlers v1.5.2
+ github.com/gorilla/mux v1.8.1
+ github.com/gorilla/websocket v1.5.4-0.20240702125206-a62d9d2a8413
+ github.com/jedib0t/go-pretty/v6 v6.6.8
github.com/manifoldco/promptui v0.9.0
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354
- github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.14.0
- github.com/spf13/cobra v1.6.1
- github.com/stretchr/testify v1.8.2
- golang.org/x/crypto v0.7.0
- golang.org/x/oauth2 v0.8.0
- golang.org/x/sync v0.1.0
+ github.com/prometheus/client_golang v1.23.0
+ github.com/spf13/cobra v1.9.1
+ github.com/stretchr/testify v1.11.0
+ golang.org/x/crypto v0.41.0
+ golang.org/x/mod v0.27.0
+ golang.org/x/oauth2 v0.30.0
+ golang.org/x/sync v0.16.0
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
- gorm.io/datatypes v1.1.1
- gorm.io/driver/mysql v1.4.7
- gorm.io/driver/sqlite v1.4.4
- gorm.io/gorm v1.24.6
+ gorm.io/datatypes v1.2.6
+ gorm.io/driver/mysql v1.6.0
+ gorm.io/driver/sqlite v1.6.0
+ gorm.io/gorm v1.30.1
)
require (
- github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
+ filippo.io/edwards25519 v1.1.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chzyer/readline v1.5.1 // indirect
- github.com/cloudflare/circl v1.3.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/felixge/httpsnoop v1.0.3 // indirect
- github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 // indirect
- github.com/frankban/quicktest v1.14.3 // indirect
- github.com/go-logr/logr v1.2.3 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1 // indirect
- github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect
- github.com/go-openapi/analysis v0.21.4 // indirect
- github.com/go-openapi/jsonpointer v0.19.5 // indirect
- github.com/go-openapi/jsonreference v0.20.0 // indirect
- github.com/go-openapi/loads v0.21.2 // indirect
- github.com/go-openapi/spec v0.20.8 // indirect
- github.com/go-openapi/swag v0.22.4 // indirect
- github.com/go-openapi/validate v0.22.1 // indirect
- github.com/go-sql-driver/mysql v1.7.0 // indirect
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/go-openapi/analysis v0.23.0 // indirect
+ github.com/go-openapi/jsonpointer v0.21.2 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/loads v0.22.0 // indirect
+ github.com/go-openapi/spec v0.21.0 // indirect
+ github.com/go-openapi/validate v0.24.0 // indirect
+ github.com/go-sql-driver/mysql v1.9.3 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
- github.com/juju/errors v1.0.0 // indirect
- github.com/juju/testing v1.0.2 // indirect
- github.com/juju/webbrowser v1.0.0 // indirect
- github.com/julienschmidt/httprouter v1.3.0 // indirect
- github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
- github.com/kr/fs v0.1.0 // indirect
- github.com/kr/pretty v0.3.1 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
- github.com/mattn/go-isatty v0.0.19 // indirect
- github.com/mattn/go-runewidth v0.0.14 // indirect
- github.com/mattn/go-sqlite3 v1.14.16 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/mattn/go-sqlite3 v1.14.31 // indirect
+ github.com/minio/sio v0.4.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
- github.com/pborman/uuid v1.2.1 // indirect
- github.com/pkg/sftp v1.13.5 // indirect
- github.com/pkg/xattr v0.4.9 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.42.0 // indirect
- github.com/prometheus/procfs v0.9.0 // indirect
- github.com/rivo/uniseg v0.4.4 // indirect
- github.com/robfig/cron/v3 v3.0.1 // indirect
- github.com/rogpeppe/fastuuid v1.2.0 // indirect
- github.com/sirupsen/logrus v1.9.0 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
- github.com/stretchr/objx v0.5.0 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.65.0 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/spf13/pflag v1.0.7 // indirect
+ github.com/stretchr/objx v0.5.2 // indirect
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
- go.mongodb.org/mongo-driver v1.11.3 // indirect
- go.opentelemetry.io/otel v1.14.0 // indirect
- go.opentelemetry.io/otel/trace v1.14.0 // indirect
- golang.org/x/net v0.10.0 // indirect
- golang.org/x/sys v0.8.0 // indirect
- golang.org/x/term v0.8.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/protobuf v1.30.0 // indirect
- gopkg.in/errgo.v1 v1.0.1 // indirect
- gopkg.in/httprequest.v1 v1.2.1 // indirect
- gopkg.in/macaroon.v2 v2.1.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
+ go.mongodb.org/mongo-driver v1.17.4 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/otel v1.36.0 // indirect
+ go.opentelemetry.io/otel/metric v1.36.0 // indirect
+ go.opentelemetry.io/otel/trace v1.36.0 // indirect
+ golang.org/x/net v0.42.0 // indirect
+ golang.org/x/sys v0.35.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index eadba3dd..ef3ada85 100644
--- a/go.sum
+++ b/go.sum
@@ -1,20 +1,15 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
-github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
-github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA=
-github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 h1:B91r9bHtXp/+XRgS5aZm6ZzTdz3ahgJYmkt4xZkgDz8=
+github.com/bradleyfalzon/ghinstallation/v2 v2.16.0/go.mod h1:OeVe5ggFzoBnmgitZe/A+BqGOnv1DvU/0uiLQi1wutM=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
@@ -24,476 +19,199 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudbase/garm-provider-common v0.0.0-20230724114054-7aa0a3dfbce0 h1:5ScMXea/ZIcUbw1aXAgN8xTqSG84AOf5Maf5hBC82wQ=
-github.com/cloudbase/garm-provider-common v0.0.0-20230724114054-7aa0a3dfbce0/go.mod h1:RKzgL0MXkNeGfloQpE2swz/y4LWJr5+2Wd45bSXPB0k=
-github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
-github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
-github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
-github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cloudbase/garm-provider-common v0.1.7 h1:V0upTejFRDiyFBO4hhkMWmPtmRTguyOt/4i1u9/rfbg=
+github.com/cloudbase/garm-provider-common v0.1.7/go.mod h1:2O51WbcfqRx5fDHyyJgIFq7KdTZZnefsM+aoOchyleU=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
-github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 h1:fmFk0Wt3bBxxwZnu48jqMdaOR/IZ4vdtJFuaFV8MpIE=
-github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3/go.mod h1:bJWSKrZyQvfTnb2OudyUjurSG4/edverV7n82+K3JiM=
-github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k=
-github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
-github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
-github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
-github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
-github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1 h1:uvQJoKTHrFFu8zxoaopNKedRzwdy3+8H72we4T/5cGs=
-github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1/go.mod h1:H59IYeChwvD1po3dhGUPvq5na+4NVD7SJlbhGKvslr0=
-github.com/go-macaroon-bakery/macaroonpb v1.0.0 h1:It9exBaRMZ9iix1iJ6gwzfwsDE6ExNuwtAJ9e09v6XE=
-github.com/go-macaroon-bakery/macaroonpb v1.0.0/go.mod h1:UzrGOcbiwTXISFP2XDLDPjfhMINZa+fX/7A2lMd31zc=
-github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
-github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc=
-github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
-github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
-github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
-github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
-github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
-github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
-github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
-github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
-github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc=
-github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ=
-github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
-github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
-github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU=
-github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
-github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
-github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
-github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
-github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
-github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
-github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
-github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
-github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
-github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
-github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
-github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
-github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
-github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
-github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
-github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
-github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
-github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
-github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
-github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
-github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
-github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
-github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
-github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
-github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
-github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
-github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
-github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
-github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
-github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
-github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
+github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
+github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg=
+github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
+github.com/go-openapi/jsonpointer v0.21.2 h1:AqQaNADVwq/VnkCmQg6ogE+M3FOsKTytwges0JdwVuA=
+github.com/go-openapi/jsonpointer v0.21.2/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
+github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
+github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ=
+github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
+github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
+github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
+github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
+github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
+github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
+github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
+github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
+github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-github/v53 v53.2.0 h1:wvz3FyF53v4BK+AsnvCmeNhf8AkTaeh2SoYu/XUvTtI=
-github.com/google/go-github/v53 v53.2.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/go-github/v72 v72.0.0 h1:FcIO37BLoVPBO9igQQ6tStsv2asG4IPcYFi655PPvBM=
+github.com/google/go-github/v72 v72.0.0/go.mod h1:WWtw8GMRiL62mvIquf1kO3onRHeWWKmK01qdCY8c5fg=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
-github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
+github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/gorilla/websocket v1.5.4-0.20240702125206-a62d9d2a8413 h1:0Zn/h+BUQg6QHkybGvjFD7BnIbjjz3oWUObacn//1Go=
+github.com/gorilla/websocket v1.5.4-0.20240702125206-a62d9d2a8413/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
-github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys=
-github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
-github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y=
-github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
-github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w=
-github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E=
-github.com/jedib0t/go-pretty/v6 v6.4.6 h1:v6aG9h6Uby3IusSSEjHaZNXpHFhzqMmjXcPq1Rjl9Jw=
-github.com/jedib0t/go-pretty/v6 v6.4.6/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
+github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
+github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
+github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
+github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
+github.com/jedib0t/go-pretty/v6 v6.6.8 h1:JnnzQeRz2bACBobIaa/r+nqjvws4yEhcmaZ4n1QzsEc=
+github.com/jedib0t/go-pretty/v6 v6.6.8/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
-github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/juju/clock v1.0.3 h1:yJHIsWXeU8j3QcBdiess09SzfiXRRrsjKPn2whnMeds=
-github.com/juju/clock v1.0.3/go.mod h1:HIBvJ8kiV/n7UHwKuCkdYL4l/MDECztHR2sAvWDxxf0=
-github.com/juju/errors v1.0.0 h1:yiq7kjCLll1BiaRuNY53MGI0+EQ3rF6GB+wvboZDefM=
-github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8=
-github.com/juju/loggo v1.0.0 h1:Y6ZMQOGR9Aj3BGkiWx7HBbIx6zNwNkxhVNOHU2i1bl0=
-github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4=
-github.com/juju/qthttptest v0.1.3 h1:M0HdpwsK/UTHRGRcIw5zvh5z+QOgdqyK+ecDMN+swwM=
-github.com/juju/retry v1.0.0 h1:Tb1hFdDSPGLH/BGdYQOF7utQ9lA0ouVJX2imqgJK6tk=
-github.com/juju/retry v1.0.0/go.mod h1:SssN1eYeK3A2qjnFGTiVMbdzGJ2BfluaJblJXvuvgqA=
-github.com/juju/testing v1.0.2 h1:OR90RqCd9CJONxXamZAjLknpZdtqDyxqW8IwCbgw3i4=
-github.com/juju/testing v1.0.2/go.mod h1:h3Vd2rzB57KrdsBEy6R7bmSKPzP76BnNavt7i8PerwQ=
-github.com/juju/utils/v3 v3.0.0 h1:Gg3n63mGPbBuoXCo+EPJuMi44hGZfloI8nlCIebHu2Q=
-github.com/juju/webbrowser v1.0.0 h1:JLdmbFtCGY6Qf2jmS6bVaenJFGIFkdF1/BjUm76af78=
-github.com/juju/webbrowser v1.0.0/go.mod h1:RwVlbBcF91Q4vS+iwlkJ6bZTE3EwlrjbYlM3WMVD6Bc=
-github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
-github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce h1:3zb1HRvOAHOMZ8VGTDEBkKpCUVlF28zalZcb7RFjMnE=
-github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce/go.mod h1:JJ1ShHzaOzMzU0B5TNcdI9+vq8Y45ijVeNYxE1wJ8zM=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
-github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
-github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
-github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
-github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
-github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
-github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
-github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE=
-github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.14.31 h1:ldt6ghyPJsokUIlksH63gWZkG6qVGeEAu4zLeS4aVZM=
+github.com/mattn/go-sqlite3 v1.14.31/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA=
+github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
+github.com/minio/sio v0.4.1 h1:EMe3YBC1nf+sRQia65Rutxi+Z554XPV0dt8BIBA+a/0=
+github.com/minio/sio v0.4.1/go.mod h1:oBSjJeGbBdRMZZwna07sX9EFzZy+ywu5aofRiV1g79I=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
-github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
-github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
-github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go=
-github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg=
-github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
-github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
-github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
+github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
+github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
+github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
-github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
-github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
-github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
-github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
-github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
+github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8=
+github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI=
-github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
-github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
-github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
-github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
-github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
-github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
-go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
-go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
-go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
-go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
-go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
-go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
-go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
-go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
-go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
-golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
-golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
-golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
+go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
+go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
+go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
+go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
+go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
+golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
+golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
+golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
-golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
-google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY=
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk=
-gopkg.in/errgo.v1 v1.0.1 h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso=
-gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/httprequest.v1 v1.2.1 h1:pEPLMdF/gjWHnKxLpuCYaHFjc8vAB2wrYjXrqDVC16E=
-gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM=
-gopkg.in/macaroon.v2 v2.1.0 h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI=
-gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gorm.io/datatypes v1.1.1 h1:XAjO7NNfUKVUvnS3+BkqMrPXxCAcxDlpOYbjnizxNCw=
-gorm.io/datatypes v1.1.1/go.mod h1:u8GEgFjJ+GpsGfgHmBUcQqHm/937t3sj/SO9dvbndTg=
-gorm.io/driver/mysql v1.4.7 h1:rY46lkCspzGHn7+IYsNpSfEv9tA+SU4SkkB+GFX125Y=
-gorm.io/driver/mysql v1.4.7/go.mod h1:SxzItlnT1cb6e1e4ZRpgJN2VYtcqJgqnHxWr4wsP8oc=
-gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc=
-gorm.io/driver/sqlite v1.4.4 h1:gIufGoR0dQzjkyqDyYSCvsYR6fba1Gw5YKDqKeChxFc=
-gorm.io/driver/sqlite v1.4.4/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI=
-gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0=
-gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
-gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
-gorm.io/gorm v1.24.6 h1:wy98aq9oFEetsc4CAbKD2SoBCdMzsbSIvSUUFJuHi5s=
-gorm.io/gorm v1.24.6/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+gorm.io/datatypes v1.2.6 h1:KafLdXvFUhzNeL2ncm03Gl3eTLONQfNKZ+wJ+9Y4Nck=
+gorm.io/datatypes v1.2.6/go.mod h1:M2iO+6S3hhi4nAyYe444Pcb0dcIiOMJ7QHaUXxyiNZY=
+gorm.io/driver/mysql v1.6.0 h1:eNbLmNTpPpTOVZi8MMxCi2aaIm0ZpInbORNXDwyLGvg=
+gorm.io/driver/mysql v1.6.0/go.mod h1:D/oCC2GWK3M/dqoLxnOlaNKmXz8WNTfcS9y5ovaSqKo=
+gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
+gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
+gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ=
+gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8=
+gorm.io/driver/sqlserver v1.6.0 h1:VZOBQVsVhkHU/NzNhRJKoANt5pZGQAS1Bwc6m6dgfnc=
+gorm.io/driver/sqlserver v1.6.0/go.mod h1:WQzt4IJo/WHKnckU9jXBLMJIVNMVeTu25dnOzehntWw=
+gorm.io/gorm v1.30.1 h1:lSHg33jJTBxs2mgJRfRZeLDG+WZaHYCk3Wtfl6Ngzo4=
+gorm.io/gorm v1.30.1/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
diff --git a/internal/testing/mock_watcher.go b/internal/testing/mock_watcher.go
new file mode 100644
index 00000000..112f0de5
--- /dev/null
+++ b/internal/testing/mock_watcher.go
@@ -0,0 +1,66 @@
+//go:build testing
+// +build testing
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package testing
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm/database/common"
+)
+
+type MockWatcher struct{}
+
+func (w *MockWatcher) RegisterProducer(_ context.Context, _ string) (common.Producer, error) {
+ return &MockProducer{}, nil
+}
+
+func (w *MockWatcher) RegisterConsumer(_ context.Context, _ string, _ ...common.PayloadFilterFunc) (common.Consumer, error) {
+ return &MockConsumer{}, nil
+}
+
+func (w *MockWatcher) Close() {
+}
+
+type MockProducer struct{}
+
+func (p *MockProducer) Notify(_ common.ChangePayload) error {
+ return nil
+}
+
+func (p *MockProducer) IsClosed() bool {
+ return false
+}
+
+func (p *MockProducer) Close() {
+}
+
+type MockConsumer struct{}
+
+func (c *MockConsumer) Watch() <-chan common.ChangePayload {
+ return nil
+}
+
+func (c *MockConsumer) SetFilters(_ ...common.PayloadFilterFunc) {
+}
+
+func (c *MockConsumer) Close() {
+}
+
+func (c *MockConsumer) IsClosed() bool {
+ return false
+}
diff --git a/internal/testing/testing.go b/internal/testing/testing.go
index 754a799a..38725882 100644
--- a/internal/testing/testing.go
+++ b/internal/testing/testing.go
@@ -18,19 +18,180 @@
package testing
import (
+ "context"
+ "errors"
+ "fmt"
"os"
"path/filepath"
"sort"
"testing"
- "github.com/cloudbase/garm/config"
-
"github.com/stretchr/testify/require"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/config"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/util/appdefaults"
)
-var (
- encryptionPassphrase = "bocyasicgatEtenOubwonIbsudNutDom"
-)
+//nolint:golangci-lint,gosec
+var encryptionPassphrase = "bocyasicgatEtenOubwonIbsudNutDom"
+
+func ImpersonateAdminContext(ctx context.Context, db common.Store, s *testing.T) context.Context {
+ adminUser, err := db.GetAdminUser(ctx)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.Fatalf("failed to get admin user: %v", err)
+ }
+ newUserParams := params.NewUserParams{
+ Email: "admin@localhost",
+ Username: "admin",
+ Password: "superSecretAdminPassword@123",
+ IsAdmin: true,
+ Enabled: true,
+ }
+ adminUser, err = db.CreateUser(ctx, newUserParams)
+ if err != nil {
+ s.Fatalf("failed to create admin user: %v", err)
+ }
+ }
+ ctx = auth.PopulateContext(ctx, adminUser, nil)
+ return ctx
+}
+
+func CreateGARMTestUser(ctx context.Context, username string, db common.Store, s *testing.T) params.User {
+ newUserParams := params.NewUserParams{
+ Email: fmt.Sprintf("%s@localhost", username),
+ Username: username,
+ Password: "superSecretPassword@123",
+ IsAdmin: false,
+ Enabled: true,
+ }
+
+ user, err := db.CreateUser(ctx, newUserParams)
+ if err != nil {
+ if errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ user, err = db.GetUser(ctx, newUserParams.Username)
+ if err != nil {
+ s.Fatalf("failed to get user by email: %v", err)
+ }
+ return user
+ }
+ s.Fatalf("failed to create user: %v", err)
+ }
+
+ return user
+}
+
+func CreateGHESEndpoint(ctx context.Context, db common.Store, s *testing.T) params.ForgeEndpoint {
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "ghes.example.com",
+ Description: "GHES endpoint",
+ APIBaseURL: "https://ghes.example.com",
+ UploadBaseURL: "https://upload.ghes.example.com/",
+ BaseURL: "https://ghes.example.com",
+ }
+
+ ep, err := db.GetGithubEndpoint(ctx, endpointParams.Name)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.Fatalf("failed to get database object (%s): %v", endpointParams.Name, err)
+ }
+ ep, err = db.CreateGithubEndpoint(ctx, endpointParams)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ s.Fatalf("failed to create database object (%s): %v", endpointParams.Name, err)
+ }
+ }
+ }
+
+ return ep
+}
+
+func CreateDefaultGithubEndpoint(ctx context.Context, db common.Store, s *testing.T) params.ForgeEndpoint {
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "github.com",
+ Description: "github endpoint",
+ APIBaseURL: appdefaults.GithubDefaultBaseURL,
+ UploadBaseURL: appdefaults.GithubDefaultUploadBaseURL,
+ BaseURL: appdefaults.DefaultGithubURL,
+ }
+
+ ep, err := db.GetGithubEndpoint(ctx, endpointParams.Name)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.Fatalf("failed to get database object (github.com): %v", err)
+ }
+ ep, err = db.CreateGithubEndpoint(ctx, endpointParams)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ s.Fatalf("failed to create database object (github.com): %v", err)
+ }
+ }
+ }
+
+ return ep
+}
+
+func CreateDefaultGiteaEndpoint(ctx context.Context, db common.Store, s *testing.T) params.ForgeEndpoint {
+ endpointParams := params.CreateGiteaEndpointParams{
+ Name: "gitea.example.com",
+ Description: "gitea endpoint",
+ APIBaseURL: "https://gitea.example.com/",
+ BaseURL: "https://gitea.example.com/",
+ }
+
+ ep, err := db.GetGithubEndpoint(ctx, endpointParams.Name)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.Fatalf("failed to get database object (github.com): %v", err)
+ }
+ ep, err = db.CreateGiteaEndpoint(ctx, endpointParams)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ s.Fatalf("failed to create database object (github.com): %v", err)
+ }
+ }
+ }
+
+ return ep
+}
+
+func CreateTestGithubCredentials(ctx context.Context, credsName string, db common.Store, s *testing.T, endpoint params.ForgeEndpoint) params.ForgeCredentials {
+ newCredsParams := params.CreateGithubCredentialsParams{
+ Name: credsName,
+ Description: "Test creds",
+ AuthType: params.ForgeAuthTypePAT,
+ Endpoint: endpoint.Name,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-token",
+ },
+ }
+ newCreds, err := db.CreateGithubCredentials(ctx, newCredsParams)
+ if err != nil {
+ s.Fatalf("failed to create database object (%s): %v", credsName, err)
+ }
+ return newCreds
+}
+
+func CreateTestGiteaCredentials(ctx context.Context, credsName string, db common.Store, s *testing.T, endpoint params.ForgeEndpoint) params.ForgeCredentials {
+ newCredsParams := params.CreateGiteaCredentialsParams{
+ Name: credsName,
+ Description: "Test creds",
+ AuthType: params.ForgeAuthTypePAT,
+ Endpoint: endpoint.Name,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-token",
+ },
+ }
+ newCreds, err := db.CreateGiteaCredentials(ctx, newCredsParams)
+ if err != nil {
+ s.Fatalf("failed to create database object (%s): %v", credsName, err)
+ }
+ return newCreds
+}
func GetTestSqliteDBConfig(t *testing.T) config.Database {
dir, err := os.MkdirTemp("", "garm-config-test")
@@ -58,6 +219,10 @@ type NameAndIDDBEntity interface {
GetName() string
}
+func Ptr[T any](v T) *T {
+ return &v
+}
+
func EqualDBEntityByName[T NameAndIDDBEntity](t *testing.T, expected, actual []T) {
require.Equal(t, len(expected), len(actual))
diff --git a/locking/interface.go b/locking/interface.go
new file mode 100644
index 00000000..43ed1737
--- /dev/null
+++ b/locking/interface.go
@@ -0,0 +1,31 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import "time"
+
+type Locker interface {
+ TryLock(key, identifier string) bool
+ Lock(key, identifier string)
+ LockedBy(key string) (string, bool)
+ Unlock(key string, remove bool)
+ Delete(key string)
+}
+
+type InstanceDeleteBackoff interface {
+ ShouldProcess(key string) (bool, time.Time)
+ Delete(key string)
+ RecordFailure(key string)
+}
diff --git a/locking/local_backoff_locker.go b/locking/local_backoff_locker.go
new file mode 100644
index 00000000..93344566
--- /dev/null
+++ b/locking/local_backoff_locker.go
@@ -0,0 +1,77 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/cloudbase/garm/runner/common"
+)
+
+func NewInstanceDeleteBackoff(_ context.Context) (InstanceDeleteBackoff, error) {
+ return &instanceDeleteBackoff{}, nil
+}
+
+type instanceBackOff struct {
+ backoffSeconds float64
+ lastRecordedFailureTime time.Time
+ mux sync.Mutex
+}
+
+type instanceDeleteBackoff struct {
+ muxes sync.Map
+}
+
+func (i *instanceDeleteBackoff) ShouldProcess(key string) (bool, time.Time) {
+ backoff, loaded := i.muxes.LoadOrStore(key, &instanceBackOff{})
+ if !loaded {
+ return true, time.Time{}
+ }
+
+ ib := backoff.(*instanceBackOff)
+ ib.mux.Lock()
+ defer ib.mux.Unlock()
+
+ if ib.lastRecordedFailureTime.IsZero() || ib.backoffSeconds == 0 {
+ return true, time.Time{}
+ }
+
+ now := time.Now().UTC()
+ deadline := ib.lastRecordedFailureTime.Add(time.Duration(ib.backoffSeconds) * time.Second)
+ return now.After(deadline), deadline
+}
+
+func (i *instanceDeleteBackoff) Delete(key string) {
+ i.muxes.Delete(key)
+}
+
+func (i *instanceDeleteBackoff) RecordFailure(key string) {
+ backoff, _ := i.muxes.LoadOrStore(key, &instanceBackOff{})
+ ib := backoff.(*instanceBackOff)
+ ib.mux.Lock()
+ defer ib.mux.Unlock()
+
+ ib.lastRecordedFailureTime = time.Now().UTC()
+ if ib.backoffSeconds == 0 {
+ ib.backoffSeconds = common.PoolConsilitationInterval.Seconds()
+ } else {
+ // Geometric progression of 1.5
+ newBackoff := ib.backoffSeconds * 1.5
+ // Cap the backoff to 20 minutes
+ ib.backoffSeconds = min(newBackoff, maxBackoffSeconds)
+ }
+}
diff --git a/locking/local_backoff_locker_test.go b/locking/local_backoff_locker_test.go
new file mode 100644
index 00000000..00fe09c8
--- /dev/null
+++ b/locking/local_backoff_locker_test.go
@@ -0,0 +1,89 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type LockerBackoffTestSuite struct {
+ suite.Suite
+
+ locker *instanceDeleteBackoff
+}
+
+func (l *LockerBackoffTestSuite) SetupTest() {
+ l.locker = &instanceDeleteBackoff{}
+}
+
+func (l *LockerBackoffTestSuite) TearDownTest() {
+ l.locker = nil
+}
+
+func (l *LockerBackoffTestSuite) TestShouldProcess() {
+ shouldProcess, deadline := l.locker.ShouldProcess("test")
+ l.Require().True(shouldProcess)
+ l.Require().Equal(time.Time{}, deadline)
+
+ l.locker.muxes.Store("test", &instanceBackOff{
+ backoffSeconds: 0,
+ lastRecordedFailureTime: time.Time{},
+ })
+
+ shouldProcess, deadline = l.locker.ShouldProcess("test")
+ l.Require().True(shouldProcess)
+ l.Require().Equal(time.Time{}, deadline)
+
+ l.locker.muxes.Store("test", &instanceBackOff{
+ backoffSeconds: 100,
+ lastRecordedFailureTime: time.Now().UTC(),
+ })
+
+ shouldProcess, deadline = l.locker.ShouldProcess("test")
+ l.Require().False(shouldProcess)
+ l.Require().NotEqual(time.Time{}, deadline)
+}
+
+func (l *LockerBackoffTestSuite) TestRecordFailure() {
+ l.locker.RecordFailure("test")
+
+ mux, ok := l.locker.muxes.Load("test")
+ l.Require().True(ok)
+ ib := mux.(*instanceBackOff)
+ l.Require().NotNil(ib)
+ l.Require().NotEqual(time.Time{}, ib.lastRecordedFailureTime)
+ l.Require().Equal(float64(5), ib.backoffSeconds)
+
+ l.locker.RecordFailure("test")
+ mux, ok = l.locker.muxes.Load("test")
+ l.Require().True(ok)
+ ib = mux.(*instanceBackOff)
+ l.Require().NotNil(ib)
+ l.Require().NotEqual(time.Time{}, ib.lastRecordedFailureTime)
+ l.Require().Equal(7.5, ib.backoffSeconds)
+
+ l.locker.Delete("test")
+ mux, ok = l.locker.muxes.Load("test")
+ l.Require().False(ok)
+ l.Require().Nil(mux)
+}
+
+func TestBackoffTestSuite(t *testing.T) {
+ t.Parallel()
+ suite.Run(t, new(LockerBackoffTestSuite))
+}
diff --git a/locking/local_locker.go b/locking/local_locker.go
new file mode 100644
index 00000000..312d85ec
--- /dev/null
+++ b/locking/local_locker.go
@@ -0,0 +1,92 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import (
+ "context"
+ "sync"
+
+ dbCommon "github.com/cloudbase/garm/database/common"
+)
+
+const (
+ maxBackoffSeconds float64 = 1200 // 20 minutes
+)
+
+func NewLocalLocker(_ context.Context, _ dbCommon.Store) (Locker, error) {
+ return &keyMutex{}, nil
+}
+
+type keyMutex struct {
+ muxes sync.Map
+}
+
+type lockWithIdent struct {
+ mux sync.Mutex
+ ident string
+}
+
+var _ Locker = &keyMutex{}
+
+func (k *keyMutex) TryLock(key, identifier string) bool {
+ mux, _ := k.muxes.LoadOrStore(key, &lockWithIdent{
+ mux: sync.Mutex{},
+ })
+ keyMux := mux.(*lockWithIdent)
+ locked := keyMux.mux.TryLock()
+ if locked {
+ keyMux.ident = identifier
+ }
+ return locked
+}
+
+func (k *keyMutex) Lock(key, identifier string) {
+ mux, _ := k.muxes.LoadOrStore(key, &lockWithIdent{
+ mux: sync.Mutex{},
+ })
+ keyMux := mux.(*lockWithIdent)
+ keyMux.ident = identifier
+ keyMux.mux.Lock()
+}
+
+func (k *keyMutex) Unlock(key string, remove bool) {
+ mux, ok := k.muxes.Load(key)
+ if !ok {
+ return
+ }
+ keyMux := mux.(*lockWithIdent)
+ if remove {
+ k.Delete(key)
+ }
+ keyMux.ident = ""
+ keyMux.mux.Unlock()
+}
+
+func (k *keyMutex) Delete(key string) {
+ k.muxes.Delete(key)
+}
+
+func (k *keyMutex) LockedBy(key string) (string, bool) {
+ mux, ok := k.muxes.Load(key)
+ if !ok {
+ return "", false
+ }
+ keyMux := mux.(*lockWithIdent)
+ if keyMux.ident == "" {
+ return "", false
+ }
+
+ return keyMux.ident, true
+}
diff --git a/locking/local_locker_test.go b/locking/local_locker_test.go
new file mode 100644
index 00000000..75b4dac0
--- /dev/null
+++ b/locking/local_locker_test.go
@@ -0,0 +1,241 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package locking
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type LockerTestSuite struct {
+ suite.Suite
+
+ mux *keyMutex
+}
+
+func (l *LockerTestSuite) SetupTest() {
+ l.mux = &keyMutex{}
+ err := RegisterLocker(l.mux)
+ l.Require().NoError(err, "should register the locker")
+}
+
+func (l *LockerTestSuite) TearDownTest() {
+ l.mux = nil
+ locker = nil
+}
+
+func (l *LockerTestSuite) TestLocalLockerLockUnlock() {
+ l.mux.Lock("test", "test-identifier")
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+ l.mux.Unlock("test", true)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().False(ok)
+ l.Require().Nil(mux)
+ l.mux.Unlock("test", false)
+}
+
+func (l *LockerTestSuite) TestLocalLockerTryLock() {
+ locked := l.mux.TryLock("test", "test-identifier")
+ l.Require().True(locked)
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ locked = l.mux.TryLock("test", "another-identifier2")
+ l.Require().False(locked)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ l.mux.Unlock("test", true)
+ locked = l.mux.TryLock("test", "another-identifier2")
+ l.Require().True(locked)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("another-identifier2", keyMux.ident)
+ l.mux.Unlock("test", true)
+}
+
+func (l *LockerTestSuite) TestLocalLockertLockedBy() {
+ l.mux.Lock("test", "test-identifier")
+ identifier, ok := l.mux.LockedBy("test")
+ l.Require().True(ok)
+ l.Require().Equal("test-identifier", identifier)
+ l.mux.Unlock("test", true)
+ identifier, ok = l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+
+ l.mux.Lock("test", "test-identifier")
+ identifier, ok = l.mux.LockedBy("test")
+ l.Require().True(ok)
+ l.Require().Equal("test-identifier", identifier)
+ l.mux.Unlock("test", false)
+ identifier, ok = l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func (l *LockerTestSuite) TestLockerPanicsIfNotInitialized() {
+ locker = nil
+ l.Require().Panics(
+ func() {
+ Lock("test", "test-identifier")
+ },
+ "Lock should panic if locker is not initialized",
+ )
+
+ l.Require().Panics(
+ func() {
+ TryLock("test", "test-identifier")
+ },
+ "TryLock should panic if locker is not initialized",
+ )
+
+ l.Require().Panics(
+ func() {
+ Unlock("test", false)
+ },
+ "Unlock should panic if locker is not initialized",
+ )
+
+ l.Require().Panics(
+ func() {
+ Delete("test")
+ },
+ "Delete should panic if locker is not initialized",
+ )
+
+ l.Require().Panics(
+ func() {
+ LockedBy("test")
+ },
+ "LockedBy should panic if locker is not initialized",
+ )
+}
+
+func (l *LockerTestSuite) TestLockerAlreadyRegistered() {
+ err := RegisterLocker(l.mux)
+ l.Require().Error(err, "should not be able to register the same locker again")
+ l.Require().Equal("locker already registered", err.Error())
+}
+
+func (l *LockerTestSuite) TestLockerDelete() {
+ Lock("test", "test-identifier")
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ Delete("test")
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().False(ok)
+ l.Require().Nil(mux)
+
+ identifier, ok := l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func (l *LockerTestSuite) TestLockUnlock() {
+ Lock("test", "test-identifier")
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ Unlock("test", true)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().False(ok)
+ l.Require().Nil(mux)
+
+ identifier, ok := l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func (l *LockerTestSuite) TestLockUnlockWithoutRemove() {
+ Lock("test", "test-identifier")
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ Unlock("test", false)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("", keyMux.ident)
+
+ identifier, ok := l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func (l *LockerTestSuite) TestTryLock() {
+ locked := TryLock("test", "test-identifier")
+ l.Require().True(locked)
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ locked = TryLock("test", "another-identifier2")
+ l.Require().False(locked)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ Unlock("test", true)
+ locked = TryLock("test", "another-identifier2")
+ l.Require().True(locked)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("another-identifier2", keyMux.ident)
+ Unlock("test", true)
+}
+
+func (l *LockerTestSuite) TestLockedBy() {
+ Lock("test", "test-identifier")
+ identifier, ok := LockedBy("test")
+ l.Require().True(ok)
+ l.Require().Equal("test-identifier", identifier)
+ Unlock("test", true)
+ identifier, ok = LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+
+ Lock("test", "test-identifier2")
+ identifier, ok = LockedBy("test")
+ l.Require().True(ok)
+ l.Require().Equal("test-identifier2", identifier)
+ Unlock("test", false)
+ identifier, ok = LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func TestLockerTestSuite(t *testing.T) {
+ t.Parallel()
+ suite.Run(t, new(LockerTestSuite))
+}
diff --git a/locking/locking.go b/locking/locking.go
new file mode 100644
index 00000000..312d2e6a
--- /dev/null
+++ b/locking/locking.go
@@ -0,0 +1,90 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import (
+ "fmt"
+ "log/slog"
+ "runtime"
+ "sync"
+)
+
+var locker Locker
+
+var lockerMux = sync.Mutex{}
+
+func TryLock(key, identifier string) (ok bool) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ _, filename, line, _ := runtime.Caller(1)
+ slog.Debug("attempting to try lock", "key", key, "identifier", identifier, "caller", fmt.Sprintf("%s:%d", filename, line))
+ defer slog.Debug("try lock returned", "key", key, "identifier", identifier, "locked", ok, "caller", fmt.Sprintf("%s:%d", filename, line))
+
+ ok = locker.TryLock(key, identifier)
+ return ok
+}
+
+func Lock(key, identifier string) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ _, filename, line, _ := runtime.Caller(1)
+ slog.Debug("attempting to lock", "key", key, "identifier", identifier, "caller", fmt.Sprintf("%s:%d", filename, line))
+ defer slog.Debug("lock acquired", "key", key, "identifier", identifier, "caller", fmt.Sprintf("%s:%d", filename, line))
+
+ locker.Lock(key, identifier)
+}
+
+func Unlock(key string, remove bool) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ _, filename, line, _ := runtime.Caller(1)
+ slog.Debug("attempting to unlock", "key", key, "remove", remove, "caller", fmt.Sprintf("%s:%d", filename, line))
+ defer slog.Debug("unlock completed", "key", key, "remove", remove, "caller", fmt.Sprintf("%s:%d", filename, line))
+ locker.Unlock(key, remove)
+}
+
+func LockedBy(key string) (string, bool) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ return locker.LockedBy(key)
+}
+
+func Delete(key string) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ locker.Delete(key)
+}
+
+func RegisterLocker(lock Locker) error {
+ lockerMux.Lock()
+ defer lockerMux.Unlock()
+
+ if locker != nil {
+ return fmt.Errorf("locker already registered")
+ }
+
+ locker = lock
+ return nil
+}
diff --git a/metrics/enterprise.go b/metrics/enterprise.go
new file mode 100644
index 00000000..882b64df
--- /dev/null
+++ b/metrics/enterprise.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ EnterpriseInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsEnterpriseSubsystem,
+ Name: "info",
+ Help: "Info of the enterprise",
+ }, []string{"name", "id"})
+
+ EnterprisePoolManagerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsEnterpriseSubsystem,
+ Name: "pool_manager_status",
+ Help: "Status of the enterprise pool manager",
+ }, []string{"name", "id", "running"})
+)
diff --git a/metrics/github.go b/metrics/github.go
new file mode 100644
index 00000000..0d6f5fa7
--- /dev/null
+++ b/metrics/github.go
@@ -0,0 +1,33 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ GithubOperationCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsGithubSubsystem,
+ Name: "operations_total",
+ Help: "Total number of github operation attempts",
+ }, []string{"operation", "scope"})
+
+ GithubOperationFailedCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsGithubSubsystem,
+ Name: "errors_total",
+ Help: "Total number of failed github operation attempts",
+ }, []string{"operation", "scope"})
+)
diff --git a/metrics/health.go b/metrics/health.go
new file mode 100644
index 00000000..13194231
--- /dev/null
+++ b/metrics/health.go
@@ -0,0 +1,25 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var GarmHealth = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Name: "health",
+ Help: "Health of the garm",
+}, []string{"metadata_url", "callback_url", "webhook_url", "controller_webhook_url", "controller_id"})
diff --git a/metrics/instance.go b/metrics/instance.go
new file mode 100644
index 00000000..b9d7e1cf
--- /dev/null
+++ b/metrics/instance.go
@@ -0,0 +1,42 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ InstanceStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRunnerSubsystem,
+ Name: "status",
+ Help: "Status of the instance",
+ }, []string{"name", "status", "runner_status", "pool_owner", "pool_type", "pool_id", "provider"})
+
+ InstanceOperationCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRunnerSubsystem,
+ Name: "operations_total",
+ Help: "Total number of instance operation attempts",
+ }, []string{"operation", "provider"})
+
+ InstanceOperationFailedCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRunnerSubsystem,
+ Name: "errors_total",
+ Help: "Total number of failed instance operation attempts",
+ }, []string{"operation", "provider"})
+)
diff --git a/metrics/metrics.go b/metrics/metrics.go
index 04e218a6..1a566116 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -1,184 +1,82 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package metrics
import (
- "log"
-
- "github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner"
-
- "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
)
-var webhooksReceived *prometheus.CounterVec = nil
+const (
+ metricsNamespace = "garm"
+ metricsRunnerSubsystem = "runner"
+ metricsPoolSubsystem = "pool"
+ metricsProviderSubsystem = "provider"
+ metricsOrganizationSubsystem = "organization"
+ metricsRepositorySubsystem = "repository"
+ metricsEnterpriseSubsystem = "enterprise"
+ metricsWebhookSubsystem = "webhook"
+ metricsGithubSubsystem = "github"
+)
-// RecordWebhookWithLabels will increment a webhook metric identified by specific
-// values. If metrics are disabled, this function is a noop.
-func RecordWebhookWithLabels(lvs ...string) error {
- if webhooksReceived == nil {
- // not registered. Noop
- return nil
- }
+// RegisterMetrics registers all the metrics
+func RegisterMetrics() error {
+ var collectors []prometheus.Collector
+ collectors = append(collectors,
- counter, err := webhooksReceived.GetMetricWithLabelValues(lvs...)
- if err != nil {
- return errors.Wrap(err, "recording metric")
- }
- counter.Inc()
- return nil
-}
+ // metrics created during the periodically update of the metrics
+ //
+ // runner metrics
+ InstanceStatus,
+ // organization metrics
+ OrganizationInfo,
+ OrganizationPoolManagerStatus,
+ // enterprise metrics
+ EnterpriseInfo,
+ EnterprisePoolManagerStatus,
+ // repository metrics
+ RepositoryInfo,
+ RepositoryPoolManagerStatus,
+ // provider metrics
+ ProviderInfo,
+ // pool metrics
+ PoolInfo,
+ PoolStatus,
+ PoolMaxRunners,
+ PoolMinIdleRunners,
+ PoolBootstrapTimeout,
+ // health metrics
+ GarmHealth,
-func RegisterCollectors(runner *runner.Runner) error {
- if webhooksReceived != nil {
- // Already registered.
- return nil
- }
-
- garmCollector, err := NewGarmCollector(runner)
- if err != nil {
- return errors.Wrap(err, "getting collector")
- }
-
- if err := prometheus.Register(garmCollector); err != nil {
- return errors.Wrap(err, "registering collector")
- }
-
- // metric to count total webhooks received
- // at this point the webhook is not yet authenticated and
- // we don't know if it's meant for us or not
- webhooksReceived = prometheus.NewCounterVec(prometheus.CounterOpts{
- Name: "garm_webhooks_received",
- Help: "The total number of webhooks received",
- }, []string{"valid", "reason", "hostname", "controller_id"})
-
- err = prometheus.Register(webhooksReceived)
- if err != nil {
- return errors.Wrap(err, "registering webhooks recv counter")
- }
- return nil
-}
-
-func NewGarmCollector(r *runner.Runner) (*GarmCollector, error) {
- controllerInfo, err := r.GetControllerInfo(auth.GetAdminContext())
- if err != nil {
- return nil, errors.Wrap(err, "fetching controller info")
- }
- return &GarmCollector{
- runner: r,
- instanceMetric: prometheus.NewDesc(
- "garm_runner_status",
- "Status of the runner",
- []string{"name", "status", "runner_status", "pool_owner", "pool_type", "pool_id", "hostname", "controller_id"}, nil,
- ),
- healthMetric: prometheus.NewDesc(
- "garm_health",
- "Health of the runner",
- []string{"hostname", "controller_id"}, nil,
- ),
- cachedControllerInfo: controllerInfo,
- }, nil
-}
-
-type GarmCollector struct {
- healthMetric *prometheus.Desc
- instanceMetric *prometheus.Desc
- runner *runner.Runner
- cachedControllerInfo params.ControllerInfo
-}
-
-func (c *GarmCollector) Describe(ch chan<- *prometheus.Desc) {
- ch <- c.instanceMetric
- ch <- c.healthMetric
-}
-
-func (c *GarmCollector) Collect(ch chan<- prometheus.Metric) {
- controllerInfo, err := c.runner.GetControllerInfo(auth.GetAdminContext())
- if err != nil {
- log.Printf("failed to get controller info: %s", err)
- return
- }
- c.CollectInstanceMetric(ch, controllerInfo.Hostname, controllerInfo.ControllerID.String())
- c.CollectHealthMetric(ch, controllerInfo.Hostname, controllerInfo.ControllerID.String())
-}
-
-func (c *GarmCollector) CollectHealthMetric(ch chan<- prometheus.Metric, hostname string, controllerID string) {
- m, err := prometheus.NewConstMetric(
- c.healthMetric,
- prometheus.GaugeValue,
- 1,
- hostname,
- controllerID,
+ // metrics used within normal garm operations
+ // e.g. count instance creations, count github api calls, ...
+ //
+ // runner instances
+ InstanceOperationCount,
+ InstanceOperationFailedCount,
+ // github
+ GithubOperationCount,
+ GithubOperationFailedCount,
+ // webhook metrics
+ WebhooksReceived,
)
- if err != nil {
- log.Printf("error on creating health metric: %s", err)
- return
- }
- ch <- m
-}
-// CollectInstanceMetric collects the metrics for the runner instances
-// reflecting the statuses and the pool they belong to.
-func (c *GarmCollector) CollectInstanceMetric(ch chan<- prometheus.Metric, hostname string, controllerID string) {
- ctx := auth.GetAdminContext()
-
- instances, err := c.runner.ListAllInstances(ctx)
- if err != nil {
- log.Printf("cannot collect metrics, listing instances: %s", err)
- return
- }
-
- pools, err := c.runner.ListAllPools(ctx)
- if err != nil {
- log.Printf("listing pools: %s", err)
- // continue anyway
- }
-
- type poolInfo struct {
- Name string
- Type string
- }
-
- poolNames := make(map[string]poolInfo)
- for _, pool := range pools {
- if pool.EnterpriseName != "" {
- poolNames[pool.ID] = poolInfo{
- Name: pool.EnterpriseName,
- Type: string(pool.PoolType()),
- }
- } else if pool.OrgName != "" {
- poolNames[pool.ID] = poolInfo{
- Name: pool.OrgName,
- Type: string(pool.PoolType()),
- }
- } else {
- poolNames[pool.ID] = poolInfo{
- Name: pool.RepoName,
- Type: string(pool.PoolType()),
- }
+ for _, c := range collectors {
+ if err := prometheus.Register(c); err != nil {
+ return err
}
}
- for _, instance := range instances {
-
- m, err := prometheus.NewConstMetric(
- c.instanceMetric,
- prometheus.GaugeValue,
- 1,
- instance.Name,
- string(instance.Status),
- string(instance.RunnerStatus),
- poolNames[instance.PoolID].Name,
- poolNames[instance.PoolID].Type,
- instance.PoolID,
- hostname,
- controllerID,
- )
-
- if err != nil {
- log.Printf("cannot collect metrics, creating metric: %s", err)
- continue
- }
- ch <- m
- }
+ return nil
}
diff --git a/metrics/organization.go b/metrics/organization.go
new file mode 100644
index 00000000..d04e7a4e
--- /dev/null
+++ b/metrics/organization.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ OrganizationInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsOrganizationSubsystem,
+ Name: "info",
+ Help: "Info of the organization",
+ }, []string{"name", "id"})
+
+ OrganizationPoolManagerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsOrganizationSubsystem,
+ Name: "pool_manager_status",
+ Help: "Status of the organization pool manager",
+ }, []string{"name", "id", "running"})
+)
diff --git a/metrics/pool.go b/metrics/pool.go
new file mode 100644
index 00000000..fc6f2520
--- /dev/null
+++ b/metrics/pool.go
@@ -0,0 +1,56 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ PoolInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "info",
+ Help: "Info of the pool",
+ }, []string{"id", "image", "flavor", "prefix", "os_type", "os_arch", "tags", "provider", "pool_owner", "pool_type"})
+
+ PoolStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "status",
+ Help: "Status of the pool",
+ }, []string{"id", "enabled"})
+
+ PoolMaxRunners = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "max_runners",
+ Help: "Maximum number of runners in the pool",
+ }, []string{"id"})
+
+ PoolMinIdleRunners = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "min_idle_runners",
+ Help: "Minimum number of idle runners in the pool",
+ }, []string{"id"})
+
+ PoolBootstrapTimeout = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "bootstrap_timeout",
+ Help: "Runner bootstrap timeout in the pool",
+ }, []string{"id"})
+)
diff --git a/metrics/provider.go b/metrics/provider.go
new file mode 100644
index 00000000..3262ab3b
--- /dev/null
+++ b/metrics/provider.go
@@ -0,0 +1,26 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var ProviderInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsProviderSubsystem,
+ Name: "info",
+ Help: "Info of the organization",
+}, []string{"name", "type", "description"})
diff --git a/metrics/repository.go b/metrics/repository.go
new file mode 100644
index 00000000..21714233
--- /dev/null
+++ b/metrics/repository.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ RepositoryInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRepositorySubsystem,
+ Name: "info",
+ Help: "Info of the enterprise",
+ }, []string{"name", "id"})
+
+ RepositoryPoolManagerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRepositorySubsystem,
+ Name: "pool_manager_status",
+ Help: "Status of the enterprise pool manager",
+ }, []string{"name", "id", "running"})
+)
diff --git a/metrics/util.go b/metrics/util.go
new file mode 100644
index 00000000..d83b4973
--- /dev/null
+++ b/metrics/util.go
@@ -0,0 +1,22 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+func Bool2float64(b bool) float64 {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/metrics/webhooks.go b/metrics/webhooks.go
new file mode 100644
index 00000000..48a08f9c
--- /dev/null
+++ b/metrics/webhooks.go
@@ -0,0 +1,24 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var WebhooksReceived = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsWebhookSubsystem,
+ Name: "received",
+ Help: "The total number of webhooks received",
+}, []string{"valid", "reason"})
diff --git a/params/github.go b/params/github.go
index fc4b1c59..08f7b409 100644
--- a/params/github.go
+++ b/params/github.go
@@ -14,7 +14,16 @@
package params
-import "time"
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "time"
+
+ jwt "github.com/golang-jwt/jwt/v5"
+ "github.com/google/uuid"
+)
type Event string
@@ -22,6 +31,7 @@ const (
// WorkflowJobEvent is the event set in the webhook payload from github
// when a workflow_job hook is sent.
WorkflowJobEvent Event = "workflow_job"
+ PingEvent Event = "ping"
)
// WorkflowJob holds the payload sent by github when a workload_job is sent.
@@ -161,7 +171,9 @@ type WorkflowJob struct {
DefaultBranch string `json:"default_branch"`
} `json:"repository"`
Organization struct {
- Login string `json:"login"`
+ Login string `json:"login"`
+ // Name is a gitea specific field
+ Name string `json:"name"`
ID int64 `json:"id"`
NodeID string `json:"node_id"`
URL string `json:"url"`
@@ -207,3 +219,351 @@ type WorkflowJob struct {
SiteAdmin bool `json:"site_admin"`
} `json:"sender"`
}
+
+func (w WorkflowJob) GetOrgName(forgeType EndpointType) string {
+ if forgeType == GiteaEndpointType {
+ return w.Organization.Name
+ }
+ return w.Organization.Login
+}
+
+type RunnerSetting struct {
+ Ephemeral bool `json:"ephemeral,omitempty"`
+ IsElastic bool `json:"isElastic,omitempty"`
+ DisableUpdate bool `json:"disableUpdate,omitempty"`
+}
+
+type Label struct {
+ Type string `json:"type"`
+ Name string `json:"name"`
+}
+
+type RunnerScaleSetStatistic struct {
+ TotalAvailableJobs int `json:"totalAvailableJobs"`
+ TotalAcquiredJobs int `json:"totalAcquiredJobs"`
+ TotalAssignedJobs int `json:"totalAssignedJobs"`
+ TotalRunningJobs int `json:"totalRunningJobs"`
+ TotalRegisteredRunners int `json:"totalRegisteredRunners"`
+ TotalBusyRunners int `json:"totalBusyRunners"`
+ TotalIdleRunners int `json:"totalIdleRunners"`
+}
+
+type RunnerScaleSet struct {
+ ID int `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ RunnerGroupID int64 `json:"runnerGroupId,omitempty"`
+ RunnerGroupName string `json:"runnerGroupName,omitempty"`
+ Labels []Label `json:"labels,omitempty"`
+ RunnerSetting RunnerSetting `json:"RunnerSetting,omitempty"`
+ CreatedOn time.Time `json:"createdOn,omitempty"`
+ RunnerJitConfigURL string `json:"runnerJitConfigUrl,omitempty"`
+ GetAcquirableJobsURL string `json:"getAcquirableJobsUrl,omitempty"`
+ AcquireJobsURL string `json:"acquireJobsUrl,omitempty"`
+ Statistics *RunnerScaleSetStatistic `json:"statistics,omitempty"`
+ Status interface{} `json:"status,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+type RunnerScaleSetsResponse struct {
+ Count int `json:"count"`
+ RunnerScaleSets []RunnerScaleSet `json:"value"`
+}
+
+type ActionsServiceAdminInfoResponse struct {
+ URL string `json:"url,omitempty"`
+ Token string `json:"token,omitempty"`
+}
+
+func (a ActionsServiceAdminInfoResponse) GetURL() (*url.URL, error) {
+ if a.URL == "" {
+ return nil, fmt.Errorf("no url specified")
+ }
+ u, err := url.ParseRequestURI(a.URL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse URL: %w", err)
+ }
+ return u, nil
+}
+
+func (a ActionsServiceAdminInfoResponse) getJWT() (*jwt.Token, error) {
+ // We're parsing a token we got from the GitHub API. We can't verify its signature.
+ // We do need the expiration date however, or other info.
+ token, _, err := jwt.NewParser().ParseUnverified(a.Token, &jwt.RegisteredClaims{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse jwt token: %w", err)
+ }
+ return token, nil
+}
+
+func (a ActionsServiceAdminInfoResponse) ExiresAt() (time.Time, error) {
+ jwt, err := a.getJWT()
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to decode jwt token: %w", err)
+ }
+ expiration, err := jwt.Claims.GetExpirationTime()
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to get expiration time: %w", err)
+ }
+
+ return expiration.Time, nil
+}
+
+func (a ActionsServiceAdminInfoResponse) IsExpired() bool {
+ if exp, err := a.ExiresAt(); err == nil {
+ return time.Now().UTC().After(exp)
+ }
+ return true
+}
+
+func (a ActionsServiceAdminInfoResponse) TimeRemaining() (time.Duration, error) {
+ exp, err := a.ExiresAt()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get expiration: %w", err)
+ }
+ now := time.Now().UTC()
+ return exp.Sub(now), nil
+}
+
+func (a ActionsServiceAdminInfoResponse) ExpiresIn(t time.Duration) bool {
+ remaining, err := a.TimeRemaining()
+ if err != nil {
+ return true
+ }
+ return remaining <= t
+}
+
+type ActionsServiceAdminInfoRequest struct {
+ URL string `json:"url,omitempty"`
+ RunnerEvent string `json:"runner_event,omitempty"`
+}
+
+type RunnerScaleSetSession struct {
+ SessionID *uuid.UUID `json:"sessionId,omitempty"`
+ OwnerName string `json:"ownerName,omitempty"`
+ RunnerScaleSet *RunnerScaleSet `json:"runnerScaleSet,omitempty"`
+ MessageQueueURL string `json:"messageQueueUrl,omitempty"`
+ MessageQueueAccessToken string `json:"messageQueueAccessToken,omitempty"`
+ Statistics *RunnerScaleSetStatistic `json:"statistics,omitempty"`
+}
+
+func (a RunnerScaleSetSession) GetURL() (*url.URL, error) {
+ if a.MessageQueueURL == "" {
+ return nil, fmt.Errorf("no url specified")
+ }
+ u, err := url.ParseRequestURI(a.MessageQueueURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse URL: %w", err)
+ }
+ return u, nil
+}
+
+func (a RunnerScaleSetSession) getJWT() (*jwt.Token, error) {
+ // We're parsing a token we got from the GitHub API. We can't verify its signature.
+ // We do need the expiration date however, or other info.
+ token, _, err := jwt.NewParser().ParseUnverified(a.MessageQueueAccessToken, &jwt.RegisteredClaims{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse jwt token: %w", err)
+ }
+ return token, nil
+}
+
+func (a RunnerScaleSetSession) ExiresAt() (time.Time, error) {
+ jwt, err := a.getJWT()
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to decode jwt token: %w", err)
+ }
+ expiration, err := jwt.Claims.GetExpirationTime()
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to get expiration time: %w", err)
+ }
+
+ return expiration.Time, nil
+}
+
+func (a RunnerScaleSetSession) IsExpired() bool {
+ if exp, err := a.ExiresAt(); err == nil {
+ return time.Now().UTC().After(exp)
+ }
+ return true
+}
+
+func (a RunnerScaleSetSession) TimeRemaining() (time.Duration, error) {
+ exp, err := a.ExiresAt()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get expiration: %w", err)
+ }
+ now := time.Now().UTC()
+ return exp.Sub(now), nil
+}
+
+func (a RunnerScaleSetSession) ExpiresIn(t time.Duration) bool {
+ remaining, err := a.TimeRemaining()
+ if err != nil {
+ return true
+ }
+ return remaining <= t
+}
+
+type RunnerScaleSetMessage struct {
+ MessageID int64 `json:"messageId"`
+ MessageType string `json:"messageType"`
+ Body string `json:"body"`
+ Statistics *RunnerScaleSetStatistic `json:"statistics"`
+}
+
+func (r RunnerScaleSetMessage) IsNil() bool {
+ return r.MessageID == 0 && r.MessageType == "" && r.Body == "" && r.Statistics == nil
+}
+
+func (r RunnerScaleSetMessage) GetJobsFromBody() ([]ScaleSetJobMessage, error) {
+ var body []ScaleSetJobMessage
+ if r.Body == "" {
+ return nil, fmt.Errorf("no body specified")
+ }
+ if err := json.Unmarshal([]byte(r.Body), &body); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal body: %w", err)
+ }
+ return body, nil
+}
+
+type RunnerReference struct {
+ ID int64 `json:"id"`
+ Name string `json:"name"`
+ OS string `json:"os"`
+ RunnerScaleSetID int `json:"runnerScaleSetId"`
+ CreatedOn any `json:"createdOn"`
+ RunnerGroupID uint64 `json:"runnerGroupId"`
+ RunnerGroupName string `json:"runnerGroupName"`
+ Version string `json:"version"`
+ Enabled bool `json:"enabled"`
+ Ephemeral bool `json:"ephemeral"`
+ Status any `json:"status"`
+ DisableUpdate bool `json:"disableUpdate"`
+ ProvisioningState string `json:"provisioningState"`
+ Busy bool `json:"busy"`
+ Labels []Label `json:"labels,omitempty"`
+}
+
+func (r RunnerReference) GetStatus() RunnerStatus {
+ status, ok := r.Status.(string)
+ if !ok {
+ return RunnerUnknown
+ }
+ runnerStatus := RunnerStatus(status)
+ if !runnerStatus.IsValid() {
+ return RunnerUnknown
+ }
+
+ if runnerStatus == RunnerOnline {
+ if r.Busy {
+ return RunnerActive
+ }
+ return RunnerIdle
+ }
+ return runnerStatus
+}
+
+type RunnerScaleSetJitRunnerConfig struct {
+ Runner *RunnerReference `json:"runner"`
+ EncodedJITConfig string `json:"encodedJITConfig"`
+}
+
+func (r RunnerScaleSetJitRunnerConfig) DecodedJITConfig() (map[string]string, error) {
+ if r.EncodedJITConfig == "" {
+ return nil, fmt.Errorf("no encoded JIT config specified")
+ }
+ decoded, err := base64.StdEncoding.DecodeString(r.EncodedJITConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode JIT config: %w", err)
+ }
+ jitConfig := make(map[string]string)
+ if err := json.Unmarshal(decoded, &jitConfig); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal JIT config: %w", err)
+ }
+ return jitConfig, nil
+}
+
+type RunnerReferenceList struct {
+ Count int `json:"count"`
+ RunnerReferences []RunnerReference `json:"value"`
+}
+
+type AcquirableJobList struct {
+ Count int `json:"count"`
+ Jobs []AcquirableJob `json:"value"`
+}
+
+type AcquirableJob struct {
+ AcquireJobURL string `json:"acquireJobUrl"`
+ MessageType string `json:"messageType"`
+ RunnerRequestID int64 `json:"run0ne00rRequestId"`
+ RepositoryName string `json:"repositoryName"`
+ OwnerName string `json:"ownerName"`
+ JobWorkflowRef string `json:"jobWorkflowRef"`
+ EventName string `json:"eventName"`
+ RequestLabels []string `json:"requestLabels"`
+}
+
+type RunnerGroup struct {
+ ID int64 `json:"id"`
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ IsDefault bool `json:"isDefaultGroup"`
+}
+
+type RunnerGroupList struct {
+ Count int `json:"count"`
+ RunnerGroups []RunnerGroup `json:"value"`
+}
+
+type ScaleSetJobMessage struct {
+ MessageType string `json:"messageType,omitempty"`
+ JobID string `json:"jobId,omitempty"`
+ RunnerRequestID int64 `json:"runnerRequestId,omitempty"`
+ RepositoryName string `json:"repositoryName,omitempty"`
+ OwnerName string `json:"ownerName,omitempty"`
+ JobWorkflowRef string `json:"jobWorkflowRef,omitempty"`
+ JobDisplayName string `json:"jobDisplayName,omitempty"`
+ WorkflowRunID int64 `json:"workflowRunId,omitempty"`
+ EventName string `json:"eventName,omitempty"`
+ RequestLabels []string `json:"requestLabels,omitempty"`
+ QueueTime time.Time `json:"queueTime,omitempty"`
+ ScaleSetAssignTime time.Time `json:"scaleSetAssignTime,omitempty"`
+ RunnerAssignTime time.Time `json:"runnerAssignTime,omitempty"`
+ FinishTime time.Time `json:"finishTime,omitempty"`
+ Result string `json:"result,omitempty"`
+ RunnerID int64 `json:"runnerId,omitempty"`
+ RunnerName string `json:"runnerName,omitempty"`
+ AcquireJobURL string `json:"acquireJobUrl,omitempty"`
+}
+
+func (s ScaleSetJobMessage) MessageTypeToStatus() JobStatus {
+ switch s.MessageType {
+ case MessageTypeJobAssigned:
+ return JobStatusQueued
+ case MessageTypeJobStarted:
+ return JobStatusInProgress
+ case MessageTypeJobCompleted:
+ return JobStatusCompleted
+ default:
+ return JobStatusQueued
+ }
+}
+
+func (s ScaleSetJobMessage) ToJob() Job {
+ return Job{
+ ScaleSetJobID: s.JobID,
+ Action: s.EventName,
+ RunID: s.WorkflowRunID,
+ Status: string(s.MessageTypeToStatus()),
+ Conclusion: s.Result,
+ CompletedAt: s.FinishTime,
+ StartedAt: s.RunnerAssignTime,
+ Name: s.JobDisplayName,
+ GithubRunnerID: s.RunnerID,
+ RunnerName: s.RunnerName,
+ RepositoryName: s.RepositoryName,
+ RepositoryOwner: s.OwnerName,
+ Labels: s.RequestLabels,
+ }
+}
diff --git a/params/interfaces.go b/params/interfaces.go
new file mode 100644
index 00000000..31ef635f
--- /dev/null
+++ b/params/interfaces.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package params
+
+import "time"
+
+// EntityGetter is implemented by all github entities (repositories, organizations and enterprises).
+// It defines the GetEntity() function which returns a github entity.
+type EntityGetter interface {
+ GetEntity() (ForgeEntity, error)
+}
+
+type IDGetter interface {
+ GetID() uint
+}
+
+type CreationDateGetter interface {
+ GetCreatedAt() time.Time
+}
+
+type ForgeCredentialsGetter interface {
+ GetForgeCredentials() ForgeCredentials
+}
diff --git a/params/params.go b/params/params.go
index 83aa2abc..1acd95e1 100644
--- a/params/params.go
+++ b/params/params.go
@@ -15,24 +15,73 @@
package params
import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "crypto/x509"
"encoding/json"
+ "encoding/pem"
+ "fmt"
+ "math"
+ "net"
+ "net/http"
"time"
- commonParams "github.com/cloudbase/garm-provider-common/params"
-
- "github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/google/go-github/v53/github"
+ "github.com/bradleyfalzon/ghinstallation/v2"
+ "github.com/google/go-github/v72/github"
"github.com/google/uuid"
+ "golang.org/x/oauth2"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/util/appdefaults"
)
type (
- PoolType string
- EventType string
- EventLevel string
- ProviderType string
- JobStatus string
- RunnerStatus string
+ ForgeEntityType string
+ EventType string
+ EventLevel string
+ ProviderType string
+ JobStatus string
+ RunnerStatus string
+ WebhookEndpointType string
+ ForgeAuthType string
+ EndpointType string
+ PoolBalancerType string
+ ScaleSetState string
+ ScaleSetMessageType string
+)
+
+func (s RunnerStatus) IsValid() bool {
+ switch s {
+ case RunnerIdle, RunnerPending, RunnerTerminated,
+ RunnerInstalling, RunnerFailed,
+ RunnerActive, RunnerOffline,
+ RunnerUnknown, RunnerOnline:
+
+ return true
+ }
+ return false
+}
+
+const (
+ // PoolBalancerTypeRoundRobin will try to cycle through the pools of an entity
+ // in a round robin fashion. For example, if a repository has multiple pools that
+ // match a certain set of labels, and the entity is configured to use round robin
+ // balancer, the pool manager will attempt to create instances in each pool in turn
+ // for each job that needs to be serviced. So job1 in pool1, job2 in pool2 and so on.
+ PoolBalancerTypeRoundRobin PoolBalancerType = "roundrobin"
+ // PoolBalancerTypePack will try to create instances in the first pool that matches
+ // the required labels. If the pool is full, it will move on to the next pool and so on.
+ PoolBalancerTypePack PoolBalancerType = "pack"
+ // PoolBalancerTypeNone denotes to the default behavior of the pool manager, which is
+ // to use the round robin balancer.
+ PoolBalancerTypeNone PoolBalancerType = ""
+)
+
+const (
+ AutoEndpointType EndpointType = ""
+ GithubEndpointType EndpointType = "github"
+ GiteaEndpointType EndpointType = "gitea"
)
const (
@@ -42,6 +91,16 @@ const (
ExternalProvider ProviderType = "external"
)
+const (
+ // WebhookEndpointDirect instructs garm that it should attempt to create a webhook
+ // in the target entity, using the callback URL defined in the config as a target.
+ WebhookEndpointDirect WebhookEndpointType = "direct"
+ // WebhookEndpointTunnel instructs garm that it should attempt to create a webhook
+ // in the target entity, using the tunnel URL as a base for the webhook URL.
+ // This is defined for future use.
+ WebhookEndpointTunnel WebhookEndpointType = "tunnel"
+)
+
const (
JobStatusQueued JobStatus = "queued"
JobStatusInProgress JobStatus = "in_progress"
@@ -49,9 +108,15 @@ const (
)
const (
- RepositoryPool PoolType = "repository"
- OrganizationPool PoolType = "organization"
- EnterprisePool PoolType = "enterprise"
+ ForgeEntityTypeRepository ForgeEntityType = "repository"
+ ForgeEntityTypeOrganization ForgeEntityType = "organization"
+ ForgeEntityTypeEnterprise ForgeEntityType = "enterprise"
+)
+
+const (
+ MetricsLabelEnterpriseScope = "Enterprise"
+ MetricsLabelRepositoryScope = "Repository"
+ MetricsLabelOrganizationScope = "Organization"
)
const (
@@ -72,15 +137,60 @@ const (
RunnerInstalling RunnerStatus = "installing"
RunnerFailed RunnerStatus = "failed"
RunnerActive RunnerStatus = "active"
+ RunnerOffline RunnerStatus = "offline"
+ RunnerOnline RunnerStatus = "online"
+ RunnerUnknown RunnerStatus = "unknown"
)
-type StatusMessage struct {
- CreatedAt time.Time `json:"created_at"`
- Message string `json:"message"`
- EventType EventType `json:"event_type"`
- EventLevel EventLevel `json:"event_level"`
+const (
+ // ForgeAuthTypePAT is the OAuth token based authentication
+ ForgeAuthTypePAT ForgeAuthType = "pat"
+ // ForgeAuthTypeApp is the GitHub App based authentication
+ ForgeAuthTypeApp ForgeAuthType = "app"
+)
+
+func (e ForgeEntityType) String() string {
+ return string(e)
}
+const (
+ ScaleSetPendingCreate ScaleSetState = "pending_create"
+ ScaleSetCreated ScaleSetState = "created"
+ ScaleSetError ScaleSetState = "error"
+ ScaleSetPendingDelete ScaleSetState = "pending_delete"
+ ScaleSetPendingForceDelete ScaleSetState = "pending_force_delete"
+)
+
+const (
+ MessageTypeRunnerScaleSetJobMessages ScaleSetMessageType = "RunnerScaleSetJobMessages"
+)
+
+const (
+ MessageTypeJobAssigned = "JobAssigned"
+ MessageTypeJobCompleted = "JobCompleted"
+ MessageTypeJobStarted = "JobStarted"
+ MessageTypeJobAvailable = "JobAvailable"
+)
+
+// swagger:model StatusMessage
+type StatusMessage struct {
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ Message string `json:"message,omitempty"`
+ EventType EventType `json:"event_type,omitempty"`
+ EventLevel EventLevel `json:"event_level,omitempty"`
+}
+
+// swagger:model EntityEvent
+type EntityEvent struct {
+ ID uint `json:"id,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+
+ EventType EventType `json:"event_type,omitempty"`
+ EventLevel EventLevel `json:"event_level,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+// swagger:model Instance
type Instance struct {
// ID is the database ID of this instance.
ID string `json:"id,omitempty"`
@@ -90,8 +200,12 @@ type Instance struct {
// instance in the provider.
ProviderID string `json:"provider_id,omitempty"`
+ // ProviderName is the name of the IaaS where the instance was
+ // created.
+ ProviderName string `json:"provider_name"`
+
// AgentID is the github runner agent ID.
- AgentID int64 `json:"agent_id"`
+ AgentID int64 `json:"agent_id,omitempty"`
// Name is the name associated with an instance. Depending on
// the provider, this may or may not be useful in the context of
@@ -125,6 +239,9 @@ type Instance struct {
// PoolID is the ID of the garm pool to which a runner belongs.
PoolID string `json:"pool_id,omitempty"`
+ // ScaleSetID is the ID of the scale set to which a runner belongs.
+ ScaleSetID uint `json:"scale_set_id,omitempty"`
+
// ProviderFault holds any error messages captured from the IaaS provider that is
// responsible for managing the lifecycle of the runner.
ProviderFault []byte `json:"provider_fault,omitempty"`
@@ -133,19 +250,30 @@ type Instance struct {
// up.
StatusMessages []StatusMessage `json:"status_messages,omitempty"`
+ // CreatedAt is the timestamp of the creation of this runner.
+ CreatedAt time.Time `json:"created_at,omitempty"`
+
// UpdatedAt is the timestamp of the last update to this runner.
- UpdatedAt time.Time `json:"updated_at"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
// GithubRunnerGroup is the github runner group to which the runner belongs.
// The runner group must be created by someone with access to the enterprise.
- GitHubRunnerGroup string `json:"github-runner-group"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+
+ // Job is the current job that is being serviced by this runner.
+ Job *Job `json:"job,omitempty"`
// Do not serialize sensitive info.
- CallbackURL string `json:"-"`
- MetadataURL string `json:"-"`
- CreateAttempt int `json:"-"`
- TokenFetched bool `json:"-"`
- AditionalLabels []string `json:"-"`
+ CallbackURL string `json:"-"`
+ MetadataURL string `json:"-"`
+ CreateAttempt int `json:"-"`
+ TokenFetched bool `json:"-"`
+ AditionalLabels []string `json:"-"`
+ JitConfiguration map[string]string `json:"-"`
+}
+
+func (i Instance) GetCreatedAt() time.Time {
+ return i.CreatedAt
}
func (i Instance) GetName() string {
@@ -157,24 +285,25 @@ func (i Instance) GetID() string {
}
// used by swagger client generated code
+// swagger:model Instances
type Instances []Instance
type BootstrapInstance struct {
- Name string `json:"name"`
- Tools []*github.RunnerApplicationDownload `json:"tools"`
+ Name string `json:"name,omitempty"`
+ Tools []*github.RunnerApplicationDownload `json:"tools,omitempty"`
// RepoURL is the URL the github runner agent needs to configure itself.
- RepoURL string `json:"repo_url"`
+ RepoURL string `json:"repo_url,omitempty"`
// CallbackUrl is the URL where the instance can send a post, signaling
// progress or status.
- CallbackURL string `json:"callback-url"`
+ CallbackURL string `json:"callback-url,omitempty"`
// MetadataURL is the URL where instances can fetch information needed to set themselves up.
- MetadataURL string `json:"metadata-url"`
+ MetadataURL string `json:"metadata-url,omitempty"`
// InstanceToken is the token that needs to be set by the instance in the headers
// in order to send updated back to the garm via CallbackURL.
- InstanceToken string `json:"instance-token"`
+ InstanceToken string `json:"instance-token,omitempty"`
// SSHKeys are the ssh public keys we may want to inject inside the runners, if the
// provider supports it.
- SSHKeys []string `json:"ssh-keys"`
+ SSHKeys []string `json:"ssh-keys,omitempty"`
// ExtraSpecs is an opaque raw json that gets sent to the provider
// as part of the bootstrap params for instances. It can contain
// any kind of data needed by providers. The contents of this field means
@@ -185,69 +314,78 @@ type BootstrapInstance struct {
// GitHubRunnerGroup is the github runner group in which the newly installed runner
// should be added to. The runner group must be created by someone with access to the
// enterprise.
- GitHubRunnerGroup string `json:"github-runner-group"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
// CACertBundle is a CA certificate bundle which will be sent to instances and which
// will tipically be installed as a system wide trusted root CA. by either cloud-init
// or whatever mechanism the provider will use to set up the runner.
- CACertBundle []byte `json:"ca-cert-bundle"`
+ CACertBundle []byte `json:"ca-cert-bundle,omitempty"`
// OSArch is the target OS CPU architecture of the runner.
- OSArch commonParams.OSArch `json:"arch"`
+ OSArch commonParams.OSArch `json:"arch,omitempty"`
// OSType is the target OS platform of the runner (windows, linux).
- OSType commonParams.OSType `json:"os_type"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
// Flavor is the platform specific abstraction that defines what resources will be allocated
// to the runner (CPU, RAM, disk space, etc). This field is meaningful to the provider which
// handles the actual creation.
- Flavor string `json:"flavor"`
+ Flavor string `json:"flavor,omitempty"`
// Image is the platform specific identifier of the operating system template that will be used
// to spin up a new machine.
- Image string `json:"image"`
+ Image string `json:"image,omitempty"`
// Labels are a list of github runner labels that will be added to the runner.
- Labels []string `json:"labels"`
+ Labels []string `json:"labels,omitempty"`
// PoolID is the ID of the garm pool to which this runner belongs.
- PoolID string `json:"pool_id"`
+ PoolID string `json:"pool_id,omitempty"`
// UserDataOptions are the options for the user data generation.
- UserDataOptions UserDataOptions `json:"user_data_options"`
+ UserDataOptions UserDataOptions `json:"user_data_options,omitempty"`
}
type UserDataOptions struct {
- DisableUpdatesOnBoot bool `json:"disable_updates_on_boot"`
- ExtraPackages []string `json:"extra_packages"`
+ DisableUpdatesOnBoot bool `json:"disable_updates_on_boot,omitempty"`
+ ExtraPackages []string `json:"extra_packages,omitempty"`
}
type Tag struct {
- ID string `json:"id"`
- Name string `json:"name"`
+ ID string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
}
+// swagger:model Pool
type Pool struct {
RunnerPrefix
- ID string `json:"id"`
- ProviderName string `json:"provider_name"`
- MaxRunners uint `json:"max_runners"`
- MinIdleRunners uint `json:"min_idle_runners"`
- Image string `json:"image"`
- Flavor string `json:"flavor"`
- OSType commonParams.OSType `json:"os_type"`
- OSArch commonParams.OSArch `json:"os_arch"`
- Tags []Tag `json:"tags"`
- Enabled bool `json:"enabled"`
- Instances []Instance `json:"instances"`
- RepoID string `json:"repo_id,omitempty"`
- RepoName string `json:"repo_name,omitempty"`
- OrgID string `json:"org_id,omitempty"`
- OrgName string `json:"org_name,omitempty"`
- EnterpriseID string `json:"enterprise_id,omitempty"`
- EnterpriseName string `json:"enterprise_name,omitempty"`
- RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout"`
+ ID string `json:"id,omitempty"`
+ ProviderName string `json:"provider_name,omitempty"`
+ MaxRunners uint `json:"max_runners,omitempty"`
+ MinIdleRunners uint `json:"min_idle_runners,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Tags []Tag `json:"tags,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ Instances []Instance `json:"instances,omitempty"`
+
+ RepoID string `json:"repo_id,omitempty"`
+ RepoName string `json:"repo_name,omitempty"`
+
+ OrgID string `json:"org_id,omitempty"`
+ OrgName string `json:"org_name,omitempty"`
+
+ EnterpriseID string `json:"enterprise_id,omitempty"`
+ EnterpriseName string `json:"enterprise_name,omitempty"`
+
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+
+ RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
// ExtraSpecs is an opaque raw json that gets sent to the provider
// as part of the bootstrap params for instances. It can contain
// any kind of data needed by providers. The contents of this field means
@@ -256,7 +394,64 @@ type Pool struct {
ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
// GithubRunnerGroup is the github runner group in which the runners will be added.
// The runner group must be created by someone with access to the enterprise.
- GitHubRunnerGroup string `json:"github-runner-group"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+
+ // Priority is the priority of the pool. The higher the number, the higher the priority.
+ // When fetching matching pools for a set of tags, the result will be sorted in descending
+ // order of priority.
+ Priority uint `json:"priority,omitempty"`
+}
+
+func (p Pool) BelongsTo(entity ForgeEntity) bool {
+ switch p.PoolType() {
+ case ForgeEntityTypeRepository:
+ return p.RepoID == entity.ID
+ case ForgeEntityTypeOrganization:
+ return p.OrgID == entity.ID
+ case ForgeEntityTypeEnterprise:
+ return p.EnterpriseID == entity.ID
+ }
+ return false
+}
+
+func (p Pool) GetCreatedAt() time.Time {
+ return p.CreatedAt
+}
+
+func (p Pool) MinIdleRunnersAsInt() int {
+ if p.MinIdleRunners > math.MaxInt {
+ return math.MaxInt
+ }
+
+ return int(p.MinIdleRunners)
+}
+
+func (p Pool) MaxRunnersAsInt() int {
+ if p.MaxRunners > math.MaxInt {
+ return math.MaxInt
+ }
+ return int(p.MaxRunners)
+}
+
+func (p Pool) GetEntity() (ForgeEntity, error) {
+ switch p.PoolType() {
+ case ForgeEntityTypeRepository:
+ return ForgeEntity{
+ ID: p.RepoID,
+ EntityType: ForgeEntityTypeRepository,
+ }, nil
+ case ForgeEntityTypeOrganization:
+ return ForgeEntity{
+ ID: p.OrgID,
+ EntityType: ForgeEntityTypeOrganization,
+ }, nil
+ case ForgeEntityTypeEnterprise:
+ return ForgeEntity{
+ ID: p.EnterpriseID,
+ EntityType: ForgeEntityTypeEnterprise,
+ }, nil
+ }
+ return ForgeEntity{}, fmt.Errorf("pool has no associated entity")
}
func (p Pool) GetID() string {
@@ -270,42 +465,201 @@ func (p *Pool) RunnerTimeout() uint {
return p.RunnerBootstrapTimeout
}
-func (p *Pool) PoolType() PoolType {
- if p.RepoID != "" {
- return RepositoryPool
- } else if p.OrgID != "" {
- return OrganizationPool
- } else if p.EnterpriseID != "" {
- return EnterprisePool
+func (p *Pool) PoolType() ForgeEntityType {
+ switch {
+ case p.RepoID != "":
+ return ForgeEntityTypeRepository
+ case p.OrgID != "":
+ return ForgeEntityTypeOrganization
+ case p.EnterpriseID != "":
+ return ForgeEntityTypeEnterprise
}
return ""
}
-// used by swagger client generated code
-type Pools []Pool
+func (p *Pool) HasRequiredLabels(set []string) bool {
+ asMap := make(map[string]struct{}, len(p.Tags))
+ for _, t := range p.Tags {
+ asMap[t.Name] = struct{}{}
+ }
-type Internal struct {
- OAuth2Token string `json:"oauth2"`
- ControllerID string `json:"controller_id"`
- InstanceCallbackURL string `json:"instance_callback_url"`
- InstanceMetadataURL string `json:"instance_metadata_url"`
- JWTSecret string `json:"jwt_secret"`
- // GithubCredentialsDetails contains all info about the credentials, except the
- // token, which is added above.
- GithubCredentialsDetails GithubCredentials `json:"gh_creds_details"`
+ for _, l := range set {
+ if _, ok := asMap[l]; !ok {
+ return false
+ }
+ }
+ return true
}
+// used by swagger client generated code
+// swagger:model Pools
+type Pools []Pool
+
+// swagger:model ScaleSet
+type ScaleSet struct {
+ RunnerPrefix
+
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+
+ ID uint `json:"id,omitempty"`
+ ScaleSetID int `json:"scale_set_id,omitempty"`
+ Name string `json:"name,omitempty"`
+ DisableUpdate bool `json:"disable_update"`
+
+ State ScaleSetState `json:"state"`
+ ExtendedState string `json:"extended_state,omitempty"`
+
+ ProviderName string `json:"provider_name,omitempty"`
+ MaxRunners uint `json:"max_runners,omitempty"`
+ MinIdleRunners uint `json:"min_idle_runners,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ Instances []Instance `json:"instances,omitempty"`
+ DesiredRunnerCount int `json:"desired_runner_count,omitempty"`
+
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+
+ RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
+ // ExtraSpecs is an opaque raw json that gets sent to the provider
+ // as part of the bootstrap params for instances. It can contain
+ // any kind of data needed by providers. The contents of this field means
+ // nothing to garm itself. We don't act on the information in this field at
+ // all. We only validate that it's a proper json.
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+ // GithubRunnerGroup is the github runner group in which the runners will be added.
+ // The runner group must be created by someone with access to the enterprise.
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+
+ StatusMessages []StatusMessage `json:"status_messages"`
+
+ RepoID string `json:"repo_id,omitempty"`
+ RepoName string `json:"repo_name,omitempty"`
+
+ OrgID string `json:"org_id,omitempty"`
+ OrgName string `json:"org_name,omitempty"`
+
+ EnterpriseID string `json:"enterprise_id,omitempty"`
+ EnterpriseName string `json:"enterprise_name,omitempty"`
+
+ LastMessageID int64 `json:"-"`
+}
+
+func (p ScaleSet) BelongsTo(entity ForgeEntity) bool {
+ switch p.ScaleSetType() {
+ case ForgeEntityTypeRepository:
+ return p.RepoID == entity.ID
+ case ForgeEntityTypeOrganization:
+ return p.OrgID == entity.ID
+ case ForgeEntityTypeEnterprise:
+ return p.EnterpriseID == entity.ID
+ }
+ return false
+}
+
+func (p ScaleSet) GetID() uint {
+ return p.ID
+}
+
+func (p ScaleSet) GetEntity() (ForgeEntity, error) {
+ switch p.ScaleSetType() {
+ case ForgeEntityTypeRepository:
+ return ForgeEntity{
+ ID: p.RepoID,
+ EntityType: ForgeEntityTypeRepository,
+ }, nil
+ case ForgeEntityTypeOrganization:
+ return ForgeEntity{
+ ID: p.OrgID,
+ EntityType: ForgeEntityTypeOrganization,
+ }, nil
+ case ForgeEntityTypeEnterprise:
+ return ForgeEntity{
+ ID: p.EnterpriseID,
+ EntityType: ForgeEntityTypeEnterprise,
+ }, nil
+ }
+ return ForgeEntity{}, fmt.Errorf("scale set has no associated entity")
+}
+
+func (p *ScaleSet) ScaleSetType() ForgeEntityType {
+ switch {
+ case p.RepoID != "":
+ return ForgeEntityTypeRepository
+ case p.OrgID != "":
+ return ForgeEntityTypeOrganization
+ case p.EnterpriseID != "":
+ return ForgeEntityTypeEnterprise
+ }
+ return ""
+}
+
+func (p *ScaleSet) RunnerTimeout() uint {
+ if p.RunnerBootstrapTimeout == 0 {
+ return appdefaults.DefaultRunnerBootstrapTimeout
+ }
+ return p.RunnerBootstrapTimeout
+}
+
+// used by swagger client generated code
+// swagger:model ScaleSets
+type ScaleSets []ScaleSet
+
+// swagger:model Repository
type Repository struct {
- ID string `json:"id"`
- Owner string `json:"owner"`
- Name string `json:"name"`
- Pools []Pool `json:"pool,omitempty"`
- CredentialsName string `json:"credentials_name"`
+ ID string `json:"id,omitempty"`
+ Owner string `json:"owner,omitempty"`
+ Name string `json:"name,omitempty"`
+ Pools []Pool `json:"pool,omitempty"`
+ // CredentialName is the name of the credentials associated with the enterprise.
+ // This field is now deprecated. Use CredentialsID instead. This field will be
+ // removed in v0.2.0.
+ CredentialsName string `json:"credentials_name,omitempty"`
+
+ CredentialsID uint `json:"credentials_id,omitempty"`
+ Credentials ForgeCredentials `json:"credentials,omitempty"`
+
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ Events []EntityEvent `json:"events,omitempty"`
// Do not serialize sensitive info.
WebhookSecret string `json:"-"`
}
+func (r Repository) GetCredentialsName() string {
+ if r.CredentialsName != "" {
+ return r.CredentialsName
+ }
+ return r.Credentials.Name
+}
+
+func (r Repository) CreationDateGetter() time.Time {
+ return r.CreatedAt
+}
+
+func (r Repository) GetEntity() (ForgeEntity, error) {
+ if r.ID == "" {
+ return ForgeEntity{}, fmt.Errorf("repository has no ID")
+ }
+ return ForgeEntity{
+ ID: r.ID,
+ EntityType: ForgeEntityTypeRepository,
+ Owner: r.Owner,
+ Name: r.Name,
+ PoolBalancerType: r.PoolBalancerType,
+ Credentials: r.Credentials,
+ WebhookSecret: r.WebhookSecret,
+ CreatedAt: r.CreatedAt,
+ UpdatedAt: r.UpdatedAt,
+ }, nil
+}
+
func (r Repository) GetName() string {
return r.Name
}
@@ -314,19 +668,62 @@ func (r Repository) GetID() string {
return r.ID
}
+func (r Repository) GetBalancerType() PoolBalancerType {
+ if r.PoolBalancerType == "" {
+ return PoolBalancerTypeRoundRobin
+ }
+ return r.PoolBalancerType
+}
+
+func (r Repository) String() string {
+ return fmt.Sprintf("%s/%s", r.Owner, r.Name)
+}
+
// used by swagger client generated code
+// swagger:model Repositories
type Repositories []Repository
+// swagger:model Organization
type Organization struct {
- ID string `json:"id"`
- Name string `json:"name"`
- Pools []Pool `json:"pool,omitempty"`
- CredentialsName string `json:"credentials_name"`
+ ID string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Pools []Pool `json:"pool,omitempty"`
+ // CredentialName is the name of the credentials associated with the enterprise.
+ // This field is now deprecated. Use CredentialsID instead. This field will be
+ // removed in v0.2.0.
+ CredentialsName string `json:"credentials_name,omitempty"`
+ Credentials ForgeCredentials `json:"credentials,omitempty"`
+ CredentialsID uint `json:"credentials_id,omitempty"`
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ Events []EntityEvent `json:"events,omitempty"`
// Do not serialize sensitive info.
WebhookSecret string `json:"-"`
}
+func (o Organization) GetCreatedAt() time.Time {
+ return o.CreatedAt
+}
+
+func (o Organization) GetEntity() (ForgeEntity, error) {
+ if o.ID == "" {
+ return ForgeEntity{}, fmt.Errorf("organization has no ID")
+ }
+ return ForgeEntity{
+ ID: o.ID,
+ EntityType: ForgeEntityTypeOrganization,
+ Owner: o.Name,
+ WebhookSecret: o.WebhookSecret,
+ PoolBalancerType: o.PoolBalancerType,
+ Credentials: o.Credentials,
+ CreatedAt: o.CreatedAt,
+ UpdatedAt: o.UpdatedAt,
+ }, nil
+}
+
func (o Organization) GetName() string {
return o.Name
}
@@ -335,19 +732,58 @@ func (o Organization) GetID() string {
return o.ID
}
+func (o Organization) GetBalancerType() PoolBalancerType {
+ if o.PoolBalancerType == "" {
+ return PoolBalancerTypeRoundRobin
+ }
+ return o.PoolBalancerType
+}
+
// used by swagger client generated code
+// swagger:model Organizations
type Organizations []Organization
+// swagger:model Enterprise
type Enterprise struct {
- ID string `json:"id"`
- Name string `json:"name"`
- Pools []Pool `json:"pool,omitempty"`
- CredentialsName string `json:"credentials_name"`
+ ID string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Pools []Pool `json:"pool,omitempty"`
+ // CredentialName is the name of the credentials associated with the enterprise.
+ // This field is now deprecated. Use CredentialsID instead. This field will be
+ // removed in v0.2.0.
+ CredentialsName string `json:"credentials_name,omitempty"`
+ Credentials ForgeCredentials `json:"credentials,omitempty"`
+ CredentialsID uint `json:"credentials_id,omitempty"`
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ Events []EntityEvent `json:"events,omitempty"`
// Do not serialize sensitive info.
WebhookSecret string `json:"-"`
}
+func (e Enterprise) GetCreatedAt() time.Time {
+ return e.CreatedAt
+}
+
+func (e Enterprise) GetEntity() (ForgeEntity, error) {
+ if e.ID == "" {
+ return ForgeEntity{}, fmt.Errorf("enterprise has no ID")
+ }
+ return ForgeEntity{
+ ID: e.ID,
+ EntityType: ForgeEntityTypeEnterprise,
+ Owner: e.Name,
+ WebhookSecret: e.WebhookSecret,
+ PoolBalancerType: e.PoolBalancerType,
+ Credentials: e.Credentials,
+ CreatedAt: e.CreatedAt,
+ UpdatedAt: e.UpdatedAt,
+ }, nil
+}
+
func (e Enterprise) GetName() string {
return e.Name
}
@@ -356,71 +792,265 @@ func (e Enterprise) GetID() string {
return e.ID
}
+func (e Enterprise) GetBalancerType() PoolBalancerType {
+ if e.PoolBalancerType == "" {
+ return PoolBalancerTypeRoundRobin
+ }
+ return e.PoolBalancerType
+}
+
// used by swagger client generated code
+// swagger:model Enterprises
type Enterprises []Enterprise
// Users holds information about a particular user
+// swagger:model User
type User struct {
- ID string `json:"id"`
- CreatedAt time.Time `json:"created_at"`
- UpdatedAt time.Time `json:"updated_at"`
- Email string `json:"email"`
- Username string `json:"username"`
- FullName string `json:"full_name"`
- Password string `json:"-"`
- Enabled bool `json:"enabled"`
- IsAdmin bool `json:"is_admin"`
+ ID string `json:"id,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ Email string `json:"email,omitempty"`
+ Username string `json:"username,omitempty"`
+ FullName string `json:"full_name,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ IsAdmin bool `json:"is_admin,omitempty"`
+ // Do not serialize sensitive info.
+ Password string `json:"-"`
+ Generation uint `json:"-"`
}
// JWTResponse holds the JWT token returned as a result of a
// successful auth
+// swagger:model JWTResponse
type JWTResponse struct {
- Token string `json:"token"`
+ Token string `json:"token,omitempty"`
}
+// swagger:model ControllerInfo
type ControllerInfo struct {
- ControllerID uuid.UUID `json:"controller_id"`
- Hostname string `json:"hostname"`
+ // ControllerID is the unique ID of this controller. This ID gets generated
+ // automatically on controller init.
+ ControllerID uuid.UUID `json:"controller_id,omitempty"`
+ // Hostname is the hostname of the machine that runs this controller. In the
+ // future, this field will be migrated to a separate table that will keep track
+ // of each the controller nodes that are part of a cluster. This will happen when
+ // we implement controller scale-out capability.
+ Hostname string `json:"hostname,omitempty"`
+ // MetadataURL is the public metadata URL of the GARM instance. This URL is used
+ // by instances to fetch information they need to set themselves up. The URL itself
+ // may be made available to runners via a reverse proxy or a load balancer. That
+ // means that the user is responsible for telling GARM what the public URL is, by
+ // setting this field.
+ MetadataURL string `json:"metadata_url,omitempty"`
+ // CallbackURL is the URL where instances can send updates back to the controller.
+ // This URL is used by instances to send status updates back to the controller. The
+ // URL itself may be made available to instances via a reverse proxy or a load balancer.
+ // That means that the user is responsible for telling GARM what the public URL is, by
+ // setting this field.
+ CallbackURL string `json:"callback_url,omitempty"`
+ // WebhookURL is the base URL where the controller will receive webhooks from github.
+ // When webhook management is used, this URL is used as a base to which the controller
+ // UUID is appended and which will receive the webhooks.
+ // The URL itself may be made available to instances via a reverse proxy or a load balancer.
+ // That means that the user is responsible for telling GARM what the public URL is, by
+ // setting this field.
+ WebhookURL string `json:"webhook_url,omitempty"`
+ // ControllerWebhookURL is the controller specific URL where webhooks will be received.
+ // This field holds the WebhookURL defined above to which we append the ControllerID.
+ // Functionally it is the same as WebhookURL, but it allows us to safely manage webhooks
+ // from GARM without accidentally removing webhooks from other services or GARM controllers.
+ ControllerWebhookURL string `json:"controller_webhook_url,omitempty"`
+ // MinimumJobAgeBackoff is the minimum time in seconds that a job must be in queued state
+ // before GARM will attempt to allocate a runner for it. When set to a non zero value,
+ // GARM will ignore the job until the job's age is greater than this value. When using
+ // the min_idle_runners feature of a pool, this gives enough time for potential idle
+ // runners to pick up the job before GARM attempts to allocate a new runner, thus avoiding
+ // the need to potentially scale down runners later.
+ MinimumJobAgeBackoff uint `json:"minimum_job_age_backoff,omitempty"`
+ // Version is the version of the GARM controller.
+ Version string `json:"version,omitempty"`
}
-type GithubCredentials struct {
- Name string `json:"name,omitempty"`
- Description string `json:"description,omitempty"`
- BaseURL string `json:"base_url"`
- APIBaseURL string `json:"api_base_url"`
- UploadBaseURL string `json:"upload_base_url"`
- CABundle []byte `json:"ca_bundle,omitempty"`
+func (c *ControllerInfo) JobBackoff() time.Duration {
+ if math.MaxInt64 > c.MinimumJobAgeBackoff {
+ return time.Duration(math.MaxInt64)
+ }
+
+ return time.Duration(int64(c.MinimumJobAgeBackoff))
+}
+
+// swagger:model GithubRateLimit
+type GithubRateLimit struct {
+ Limit int `json:"limit,omitempty"`
+ Used int `json:"used,omitempty"`
+ Remaining int `json:"remaining,omitempty"`
+ Reset int64 `json:"reset,omitempty"`
+}
+
+func (g GithubRateLimit) ResetIn() time.Duration {
+ return time.Until(g.ResetAt())
+}
+
+func (g GithubRateLimit) ResetAt() time.Time {
+ if g.Reset == 0 {
+ return time.Time{}
+ }
+ return time.Unix(g.Reset, 0)
+}
+
+// swagger:model ForgeCredentials
+type ForgeCredentials struct {
+ ID uint `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ APIBaseURL string `json:"api_base_url,omitempty"`
+ UploadBaseURL string `json:"upload_base_url,omitempty"`
+ BaseURL string `json:"base_url,omitempty"`
+ CABundle []byte `json:"ca_bundle,omitempty"`
+ AuthType ForgeAuthType `json:"auth-type,omitempty"`
+
+ ForgeType EndpointType `json:"forge_type,omitempty"`
+
+ Repositories []Repository `json:"repositories,omitempty"`
+ Organizations []Organization `json:"organizations,omitempty"`
+ Enterprises []Enterprise `json:"enterprises,omitempty"`
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ RateLimit *GithubRateLimit `json:"rate_limit,omitempty"`
+
+ // Do not serialize sensitive info.
+ CredentialsPayload []byte `json:"-"`
+}
+
+func (g ForgeCredentials) GetID() uint {
+ return g.ID
+}
+
+func (g ForgeCredentials) GetHTTPClient(ctx context.Context) (*http.Client, error) {
+ var roots *x509.CertPool
+ if g.CABundle != nil {
+ roots = x509.NewCertPool()
+ ok := roots.AppendCertsFromPEM(g.CABundle)
+ if !ok {
+ return nil, fmt.Errorf("failed to parse CA cert")
+ }
+ }
+
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }
+
+ httpTransport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ TLSClientConfig: &tls.Config{
+ RootCAs: roots,
+ MinVersion: tls.VersionTLS12,
+ },
+ ForceAttemptHTTP2: true,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+
+ var tc *http.Client
+ switch g.AuthType {
+ case ForgeAuthTypeApp:
+ var app GithubApp
+ if err := json.Unmarshal(g.CredentialsPayload, &app); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal github app credentials: %w", err)
+ }
+ if app.AppID == 0 || app.InstallationID == 0 || len(app.PrivateKeyBytes) == 0 {
+ return nil, fmt.Errorf("github app credentials are missing required fields")
+ }
+ itr, err := ghinstallation.New(httpTransport, app.AppID, app.InstallationID, app.PrivateKeyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create github app installation transport: %w", err)
+ }
+ itr.BaseURL = g.APIBaseURL
+
+ tc = &http.Client{Transport: itr}
+ default:
+ var pat GithubPAT
+ if err := json.Unmarshal(g.CredentialsPayload, &pat); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal github app credentials: %w", err)
+ }
+ httpClient := &http.Client{Transport: httpTransport}
+ ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
+
+ if pat.OAuth2Token == "" {
+ return nil, fmt.Errorf("github credentials are missing the OAuth2 token")
+ }
+
+ ts := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: pat.OAuth2Token},
+ )
+ tc = oauth2.NewClient(ctx, ts)
+ }
+
+ return tc, nil
+}
+
+func (g ForgeCredentials) RootCertificateBundle() (CertificateBundle, error) {
+ if len(g.CABundle) == 0 {
+ return CertificateBundle{}, nil
+ }
+
+ ret := map[string][]byte{}
+
+ var block *pem.Block
+ rest := g.CABundle
+ for {
+ block, rest = pem.Decode(rest)
+ if block == nil {
+ break
+ }
+ pub, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return CertificateBundle{}, err
+ }
+ out := &bytes.Buffer{}
+ if err := pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: block.Bytes}); err != nil {
+ return CertificateBundle{}, err
+ }
+ ret[fmt.Sprintf("%d", pub.SerialNumber)] = out.Bytes()
+ }
+
+ return CertificateBundle{
+ RootCertificates: ret,
+ }, nil
}
// used by swagger client generated code
-type Credentials []GithubCredentials
+// swagger:model Credentials
+type Credentials []ForgeCredentials
+// swagger:model Provider
type Provider struct {
- Name string `json:"name"`
- ProviderType ProviderType `json:"type"`
- Description string `json:"description"`
+ Name string `json:"name,omitempty"`
+ ProviderType ProviderType `json:"type,omitempty"`
+ Description string `json:"description,omitempty"`
}
// used by swagger client generated code
+// swagger:model Providers
type Providers []Provider
-type UpdatePoolStateParams struct {
- WebhookSecret string
- InternalConfig *Internal
-}
-
+// swagger:model PoolManagerStatus
type PoolManagerStatus struct {
- IsRunning bool `json:"running"`
+ IsRunning bool `json:"running,omitempty"`
FailureReason string `json:"failure_reason,omitempty"`
}
type RunnerInfo struct {
- Name string
- Labels []string
+ Name string `json:"name,omitempty"`
+ Labels []string `json:"labels,omitempty"`
}
type RunnerPrefix struct {
- Prefix string `json:"runner_prefix"`
+ Prefix string `json:"runner_prefix,omitempty"`
}
func (p RunnerPrefix) GetRunnerPrefix() string {
@@ -430,36 +1060,41 @@ func (p RunnerPrefix) GetRunnerPrefix() string {
return p.Prefix
}
+// swagger:model Job
type Job struct {
// ID is the ID of the job.
- ID int64 `json:"id"`
+ ID int64 `json:"id,omitempty"`
+
+ WorkflowJobID int64 `json:"workflow_job_id,omitempty"`
+ // ScaleSetJobID is the job ID when generated for a scale set.
+ ScaleSetJobID string `json:"scaleset_job_id,omitempty"`
// RunID is the ID of the workflow run. A run may have multiple jobs.
- RunID int64 `json:"run_id"`
+ RunID int64 `json:"run_id,omitempty"`
// Action is the specific activity that triggered the event.
- Action string `json:"action"`
+ Action string `json:"action,omitempty"`
// Conclusion is the outcome of the job.
// Possible values: "success", "failure", "neutral", "cancelled", "skipped",
// "timed_out", "action_required"
- Conclusion string `json:"conclusion"`
+ Conclusion string `json:"conclusion,omitempty"`
// Status is the phase of the lifecycle that the job is currently in.
// "queued", "in_progress" and "completed".
- Status string `json:"status"`
+ Status string `json:"status,omitempty"`
// Name is the name if the job that was triggered.
- Name string `json:"name"`
+ Name string `json:"name,omitempty"`
- StartedAt time.Time
- CompletedAt time.Time
+ StartedAt time.Time `json:"started_at,omitempty"`
+ CompletedAt time.Time `json:"completed_at,omitempty"`
- GithubRunnerID int64 `json:"runner_id"`
- RunnerName string `json:"runner_name"`
- RunnerGroupID int64 `json:"runner_group_id"`
- RunnerGroupName string `json:"runner_group_name"`
+ GithubRunnerID int64 `json:"runner_id,omitempty"`
+ RunnerName string `json:"runner_name,omitempty"`
+ RunnerGroupID int64 `json:"runner_group_id,omitempty"`
+ RunnerGroupName string `json:"runner_group_name,omitempty"`
// repository in which the job was triggered.
- RepositoryName string
- RepositoryOwner string
+ RepositoryName string `json:"repository_name,omitempty"`
+ RepositoryOwner string `json:"repository_owner,omitempty"`
- Labels []string
+ Labels []string `json:"labels,omitempty"`
// The entity that received the hook.
//
@@ -474,11 +1109,148 @@ type Job struct {
OrgID *uuid.UUID `json:"org_id,omitempty"`
EnterpriseID *uuid.UUID `json:"enterprise_id,omitempty"`
- LockedBy uuid.UUID
+ LockedBy uuid.UUID `json:"locked_by,omitempty"`
- CreatedAt time.Time `json:"created_at"`
- UpdatedAt time.Time `json:"updated_at"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+}
+
+// swagger:model Jobs
+// used by swagger client generated code
+type Jobs []Job
+
+// swagger:model InstallWebhookParams
+type InstallWebhookParams struct {
+ WebhookEndpointType WebhookEndpointType `json:"webhook_endpoint_type,omitempty"`
+ InsecureSSL bool `json:"insecure_ssl,omitempty"`
+}
+
+// swagger:model HookInfo
+type HookInfo struct {
+ ID int64 `json:"id,omitempty"`
+ URL string `json:"url,omitempty"`
+ Events []string `json:"events,omitempty"`
+ Active bool `json:"active,omitempty"`
+ InsecureSSL bool `json:"insecure_ssl,omitempty"`
+}
+
+type CertificateBundle struct {
+ RootCertificates map[string][]byte `json:"root_certificates,omitempty"`
+}
+
+// swagger:model ForgeEntity
+type UpdateSystemInfoParams struct {
+ OSName string `json:"os_name,omitempty"`
+ OSVersion string `json:"os_version,omitempty"`
+ AgentID *int64 `json:"agent_id,omitempty"`
+}
+
+type ForgeEntity struct {
+ Owner string `json:"owner,omitempty"`
+ Name string `json:"name,omitempty"`
+ ID string `json:"id,omitempty"`
+ EntityType ForgeEntityType `json:"entity_type,omitempty"`
+ Credentials ForgeCredentials `json:"credentials,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+
+ WebhookSecret string `json:"-"`
+}
+
+func (g ForgeEntity) GetCreatedAt() time.Time {
+ return g.CreatedAt
+}
+
+func (g ForgeEntity) GetForgeType() (EndpointType, error) {
+ if g.Credentials.ForgeType == "" {
+ return "", fmt.Errorf("credentials forge type is empty")
+ }
+ return g.Credentials.ForgeType, nil
+}
+
+func (g ForgeEntity) ForgeURL() string {
+ switch g.EntityType {
+ case ForgeEntityTypeRepository:
+ return fmt.Sprintf("%s/%s/%s", g.Credentials.BaseURL, g.Owner, g.Name)
+ case ForgeEntityTypeOrganization:
+ return fmt.Sprintf("%s/%s", g.Credentials.BaseURL, g.Owner)
+ case ForgeEntityTypeEnterprise:
+ return fmt.Sprintf("%s/enterprises/%s", g.Credentials.BaseURL, g.Owner)
+ }
+ return ""
+}
+
+func (g ForgeEntity) GetPoolBalancerType() PoolBalancerType {
+ if g.PoolBalancerType == "" {
+ return PoolBalancerTypeRoundRobin
+ }
+ return g.PoolBalancerType
+}
+
+func (g ForgeEntity) LabelScope() string {
+ switch g.EntityType {
+ case ForgeEntityTypeRepository:
+ return MetricsLabelRepositoryScope
+ case ForgeEntityTypeOrganization:
+ return MetricsLabelOrganizationScope
+ case ForgeEntityTypeEnterprise:
+ return MetricsLabelEnterpriseScope
+ }
+ return ""
+}
+
+func (g ForgeEntity) String() string {
+ switch g.EntityType {
+ case ForgeEntityTypeRepository:
+ return fmt.Sprintf("%s/%s", g.Owner, g.Name)
+ case ForgeEntityTypeOrganization, ForgeEntityTypeEnterprise:
+ return g.Owner
+ }
+ return ""
+}
+
+func (g ForgeEntity) GetIDAsUUID() (uuid.UUID, error) {
+ if g.ID == "" {
+ return uuid.Nil, nil
+ }
+ id, err := uuid.Parse(g.ID)
+ if err != nil {
+ return uuid.Nil, fmt.Errorf("failed to parse entity ID: %w", err)
+ }
+ return id, nil
}
// used by swagger client generated code
-type Jobs []Job
+// swagger:model ForgeEndpoints
+type ForgeEndpoints []ForgeEndpoint
+
+// swagger:model ForgeEndpoint
+type ForgeEndpoint struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ APIBaseURL string `json:"api_base_url,omitempty"`
+ UploadBaseURL string `json:"upload_base_url,omitempty"`
+ BaseURL string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+
+ EndpointType EndpointType `json:"endpoint_type,omitempty"`
+}
+
+type RepositoryFilter struct {
+ Owner string
+ Name string
+ Endpoint string
+}
+
+type OrganizationFilter struct {
+ Name string
+ Endpoint string
+}
+
+type EnterpriseFilter struct {
+ Name string
+ Endpoint string
+}
diff --git a/params/requests.go b/params/requests.go
index 8b333662..c9021434 100644
--- a/params/requests.go
+++ b/params/requests.go
@@ -15,15 +15,21 @@
package params
import (
+ "crypto/x509"
"encoding/json"
+ "encoding/pem"
"fmt"
+ "net/url"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
commonParams "github.com/cloudbase/garm-provider-common/params"
-
- "github.com/cloudbase/garm-provider-common/errors"
)
-const DefaultRunnerPrefix = "garm"
+const (
+ DefaultRunnerPrefix string = "garm"
+ httpsScheme string = "https"
+ httpScheme string = "http"
+)
type InstanceRequest struct {
Name string `json:"name"`
@@ -31,81 +37,124 @@ type InstanceRequest struct {
OSVersion string `json:"os_version"`
}
+// swagger:model CreateRepoParams
type CreateRepoParams struct {
- Owner string `json:"owner"`
- Name string `json:"name"`
- CredentialsName string `json:"credentials_name"`
- WebhookSecret string `json:"webhook_secret"`
+ Owner string `json:"owner,omitempty"`
+ Name string `json:"name,omitempty"`
+ CredentialsName string `json:"credentials_name,omitempty"`
+ WebhookSecret string `json:"webhook_secret,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
+ ForgeType EndpointType `json:"forge_type,omitempty"`
}
func (c *CreateRepoParams) Validate() error {
if c.Owner == "" {
- return errors.NewBadRequestError("missing owner")
+ return runnerErrors.NewBadRequestError("missing owner")
}
if c.Name == "" {
- return errors.NewBadRequestError("missing repo name")
+ return runnerErrors.NewBadRequestError("missing repo name")
}
if c.CredentialsName == "" {
- return errors.NewBadRequestError("missing credentials name")
+ return runnerErrors.NewBadRequestError("missing credentials name")
}
if c.WebhookSecret == "" {
- return errors.NewMissingSecretError("missing secret")
+ return runnerErrors.NewMissingSecretError("missing secret")
}
+
+ switch c.ForgeType {
+ case GithubEndpointType, GiteaEndpointType, AutoEndpointType:
+ break
+ default:
+ return runnerErrors.NewBadRequestError("invalid forge type")
+ }
+
+ switch c.PoolBalancerType {
+ case PoolBalancerTypeRoundRobin, PoolBalancerTypePack, PoolBalancerTypeNone:
+ default:
+ return runnerErrors.NewBadRequestError("invalid pool balancer type")
+ }
+
return nil
}
+// swagger:model CreateOrgParams
type CreateOrgParams struct {
- Name string `json:"name"`
- CredentialsName string `json:"credentials_name"`
- WebhookSecret string `json:"webhook_secret"`
+ Name string `json:"name,omitempty"`
+ CredentialsName string `json:"credentials_name,omitempty"`
+ WebhookSecret string `json:"webhook_secret,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
+ ForgeType EndpointType `json:"forge_type,omitempty"`
}
func (c *CreateOrgParams) Validate() error {
if c.Name == "" {
- return errors.NewBadRequestError("missing org name")
+ return runnerErrors.NewBadRequestError("missing org name")
}
if c.CredentialsName == "" {
- return errors.NewBadRequestError("missing credentials name")
+ return runnerErrors.NewBadRequestError("missing credentials name")
}
if c.WebhookSecret == "" {
- return errors.NewMissingSecretError("missing secret")
+ return runnerErrors.NewMissingSecretError("missing secret")
+ }
+
+ switch c.ForgeType {
+ case GithubEndpointType, GiteaEndpointType, AutoEndpointType:
+ break
+ default:
+ return runnerErrors.NewBadRequestError("invalid forge type")
+ }
+
+ switch c.PoolBalancerType {
+ case PoolBalancerTypeRoundRobin, PoolBalancerTypePack, PoolBalancerTypeNone:
+ default:
+ return runnerErrors.NewBadRequestError("invalid pool balancer type")
}
return nil
}
+// swagger:model CreateEnterpriseParams
type CreateEnterpriseParams struct {
- Name string `json:"name"`
- CredentialsName string `json:"credentials_name"`
- WebhookSecret string `json:"webhook_secret"`
+ Name string `json:"name,omitempty"`
+ CredentialsName string `json:"credentials_name,omitempty"`
+ WebhookSecret string `json:"webhook_secret,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
}
func (c *CreateEnterpriseParams) Validate() error {
if c.Name == "" {
- return errors.NewBadRequestError("missing enterprise name")
+ return runnerErrors.NewBadRequestError("missing enterprise name")
}
if c.CredentialsName == "" {
- return errors.NewBadRequestError("missing credentials name")
+ return runnerErrors.NewBadRequestError("missing credentials name")
}
if c.WebhookSecret == "" {
- return errors.NewMissingSecretError("missing secret")
+ return runnerErrors.NewMissingSecretError("missing secret")
+ }
+
+ switch c.PoolBalancerType {
+ case PoolBalancerTypeRoundRobin, PoolBalancerTypePack, PoolBalancerTypeNone:
+ default:
+ return runnerErrors.NewBadRequestError("invalid pool balancer type")
}
return nil
}
// NewUserParams holds the needed information to create
// a new user
+// swagger:model NewUserParams
type NewUserParams struct {
- Email string `json:"email"`
- Username string `json:"username"`
- FullName string `json:"full_name"`
- Password string `json:"password"`
+ Email string `json:"email,omitempty"`
+ Username string `json:"username,omitempty"`
+ FullName string `json:"full_name,omitempty"`
+ Password string `json:"password,omitempty"`
IsAdmin bool `json:"-"`
Enabled bool `json:"-"`
}
+// swagger:model UpdatePoolParams
type UpdatePoolParams struct {
RunnerPrefix
@@ -114,50 +163,55 @@ type UpdatePoolParams struct {
MaxRunners *uint `json:"max_runners,omitempty"`
MinIdleRunners *uint `json:"min_idle_runners,omitempty"`
RunnerBootstrapTimeout *uint `json:"runner_bootstrap_timeout,omitempty"`
- Image string `json:"image"`
- Flavor string `json:"flavor"`
- OSType commonParams.OSType `json:"os_type"`
- OSArch commonParams.OSArch `json:"os_arch"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
// GithubRunnerGroup is the github runner group in which the runners of this
// pool will be added to.
// The runner group must be created by someone with access to the enterprise.
GitHubRunnerGroup *string `json:"github-runner-group,omitempty"`
+ Priority *uint `json:"priority,omitempty"`
}
type CreateInstanceParams struct {
- Name string
- OSType commonParams.OSType
- OSArch commonParams.OSArch
- Status commonParams.InstanceStatus
- RunnerStatus RunnerStatus
- CallbackURL string
- MetadataURL string
+ Name string `json:"name,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Status commonParams.InstanceStatus `json:"status,omitempty"`
+ RunnerStatus RunnerStatus `json:"runner_status,omitempty"`
+ CallbackURL string `json:"callback_url,omitempty"`
+ MetadataURL string `json:"metadata_url,omitempty"`
// GithubRunnerGroup is the github runner group to which the runner belongs.
// The runner group must be created by someone with access to the enterprise.
- GitHubRunnerGroup string
- CreateAttempt int `json:"-"`
- AditionalLabels []string
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+ CreateAttempt int `json:"-"`
+ AgentID int64 `json:"-"`
+ AditionalLabels []string `json:"aditional_labels,omitempty"`
+ JitConfiguration map[string]string `json:"jit_configuration,omitempty"`
}
+// swagger:model CreatePoolParams
type CreatePoolParams struct {
RunnerPrefix
- ProviderName string `json:"provider_name"`
- MaxRunners uint `json:"max_runners"`
- MinIdleRunners uint `json:"min_idle_runners"`
- Image string `json:"image"`
- Flavor string `json:"flavor"`
- OSType commonParams.OSType `json:"os_type"`
- OSArch commonParams.OSArch `json:"os_arch"`
- Tags []string `json:"tags"`
- Enabled bool `json:"enabled"`
- RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout"`
+ ProviderName string `json:"provider_name,omitempty"`
+ MaxRunners uint `json:"max_runners,omitempty"`
+ MinIdleRunners uint `json:"min_idle_runners,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
// GithubRunnerGroup is the github runner group in which the runners of this
// pool will be added to.
// The runner group must be created by someone with access to the enterprise.
- GitHubRunnerGroup string `json:"github-runner-group"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+ Priority uint `json:"priority,omitempty"`
}
func (p *CreatePoolParams) Validate() error {
@@ -198,43 +252,538 @@ type UpdateInstanceParams struct {
// for this instance.
Addresses []commonParams.Address `json:"addresses,omitempty"`
// Status is the status of the instance inside the provider (eg: running, stopped, etc)
- Status commonParams.InstanceStatus `json:"status,omitempty"`
- RunnerStatus RunnerStatus `json:"runner_status,omitempty"`
- ProviderFault []byte `json:"provider_fault,omitempty"`
- AgentID int64 `json:"-"`
- CreateAttempt int `json:"-"`
- TokenFetched *bool `json:"-"`
+ Status commonParams.InstanceStatus `json:"status,omitempty"`
+ RunnerStatus RunnerStatus `json:"runner_status,omitempty"`
+ ProviderFault []byte `json:"provider_fault,omitempty"`
+ AgentID int64 `json:"-"`
+ CreateAttempt int `json:"-"`
+ TokenFetched *bool `json:"-"`
+ JitConfiguration map[string]string `json:"-"`
}
type UpdateUserParams struct {
- FullName string `json:"full_name"`
- Password string `json:"password"`
- Enabled *bool `json:"enabled"`
+ FullName string `json:"full_name,omitempty"`
+ Password string `json:"password,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
}
+// swagger:model PasswordLoginParams
// PasswordLoginParams holds information used during
// password authentication, that will be passed to a
// password login function
type PasswordLoginParams struct {
- Username string `json:"username"`
- Password string `json:"password"`
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
}
// Validate checks if the username and password are set
func (p PasswordLoginParams) Validate() error {
if p.Username == "" || p.Password == "" {
- return errors.ErrUnauthorized
+ return runnerErrors.ErrUnauthorized
}
return nil
}
+// swagger:model UpdateEntityParams
type UpdateEntityParams struct {
- CredentialsName string `json:"credentials_name"`
- WebhookSecret string `json:"webhook_secret"`
+ CredentialsName string `json:"credentials_name,omitempty"`
+ WebhookSecret string `json:"webhook_secret,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
}
type InstanceUpdateMessage struct {
- Status RunnerStatus `json:"status"`
- Message string `json:"message"`
- AgentID *int64 `json:"agent_id"`
+ Status RunnerStatus `json:"status,omitempty"`
+ Message string `json:"message,omitempty"`
+ AgentID *int64 `json:"agent_id,omitempty"`
+}
+
+// swagger:model CreateGithubEndpointParams
+type CreateGithubEndpointParams struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ APIBaseURL string `json:"api_base_url,omitempty"`
+ UploadBaseURL string `json:"upload_base_url,omitempty"`
+ BaseURL string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+}
+
+func (c CreateGithubEndpointParams) Validate() error {
+ if c.APIBaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing api_base_url")
+ }
+
+ url, err := url.Parse(c.APIBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.UploadBaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing upload_base_url")
+ }
+
+ url, err = url.Parse(c.UploadBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid upload_base_url")
+ }
+
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.BaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing base_url")
+ }
+
+ url, err = url.Parse(c.BaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid base_url")
+ }
+
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.CACertBundle != nil {
+ block, _ := pem.Decode(c.CACertBundle)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ if _, err := x509.ParseCertificates(block.Bytes); err != nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateGithubEndpointParams
+type UpdateGithubEndpointParams struct {
+ Description *string `json:"description,omitempty"`
+ APIBaseURL *string `json:"api_base_url,omitempty"`
+ UploadBaseURL *string `json:"upload_base_url,omitempty"`
+ BaseURL *string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+}
+
+func (u UpdateGithubEndpointParams) Validate() error {
+ if u.APIBaseURL != nil {
+ url, err := url.Parse(*u.APIBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.UploadBaseURL != nil {
+ url, err := url.Parse(*u.UploadBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid upload_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.BaseURL != nil {
+ url, err := url.Parse(*u.BaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.CACertBundle != nil {
+ block, _ := pem.Decode(u.CACertBundle)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ if _, err := x509.ParseCertificates(block.Bytes); err != nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model GithubPAT
+type GithubPAT struct {
+ OAuth2Token string `json:"oauth2_token,omitempty"`
+}
+
+// swagger:model GithubApp
+type GithubApp struct {
+ AppID int64 `json:"app_id,omitempty"`
+ InstallationID int64 `json:"installation_id,omitempty"`
+ PrivateKeyBytes []byte `json:"private_key_bytes,omitempty"`
+}
+
+func (g GithubApp) Validate() error {
+ if g.AppID == 0 {
+ return runnerErrors.NewBadRequestError("missing app_id")
+ }
+
+ if g.InstallationID == 0 {
+ return runnerErrors.NewBadRequestError("missing installation_id")
+ }
+
+ if len(g.PrivateKeyBytes) == 0 {
+ return runnerErrors.NewBadRequestError("missing private_key_bytes")
+ }
+
+ block, _ := pem.Decode(g.PrivateKeyBytes)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid private_key_bytes")
+ }
+ // Parse the private key as PCKS1
+ _, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf("parsing private_key_path: %w", err)
+ }
+
+ return nil
+}
+
+// swagger:model CreateGithubCredentialsParams
+type CreateGithubCredentialsParams struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ Endpoint string `json:"endpoint,omitempty"`
+ AuthType ForgeAuthType `json:"auth_type,omitempty"`
+ PAT GithubPAT `json:"pat,omitempty"`
+ App GithubApp `json:"app,omitempty"`
+}
+
+func (c CreateGithubCredentialsParams) Validate() error {
+ if c.Name == "" {
+ return runnerErrors.NewBadRequestError("missing name")
+ }
+
+ if c.Endpoint == "" {
+ return runnerErrors.NewBadRequestError("missing endpoint")
+ }
+
+ switch c.AuthType {
+ case ForgeAuthTypePAT, ForgeAuthTypeApp:
+ default:
+ return runnerErrors.NewBadRequestError("invalid auth_type")
+ }
+
+ if c.AuthType == ForgeAuthTypePAT {
+ if c.PAT.OAuth2Token == "" {
+ return runnerErrors.NewBadRequestError("missing oauth2_token")
+ }
+ }
+
+ if c.AuthType == ForgeAuthTypeApp {
+ if err := c.App.Validate(); err != nil {
+ return fmt.Errorf("invalid app: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateGithubCredentialsParams
+type UpdateGithubCredentialsParams struct {
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ PAT *GithubPAT `json:"pat,omitempty"`
+ App *GithubApp `json:"app,omitempty"`
+}
+
+func (u UpdateGithubCredentialsParams) Validate() error {
+ if u.PAT != nil && u.App != nil {
+ return runnerErrors.NewBadRequestError("cannot update both PAT and App")
+ }
+
+ if u.PAT != nil {
+ if u.PAT.OAuth2Token == "" {
+ return runnerErrors.NewBadRequestError("missing oauth2_token")
+ }
+ }
+
+ if u.App != nil {
+ if err := u.App.Validate(); err != nil {
+ return fmt.Errorf("invalid app: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateControllerParams
+type UpdateControllerParams struct {
+ MetadataURL *string `json:"metadata_url,omitempty"`
+ CallbackURL *string `json:"callback_url,omitempty"`
+ WebhookURL *string `json:"webhook_url,omitempty"`
+ MinimumJobAgeBackoff *uint `json:"minimum_job_age_backoff,omitempty"`
+}
+
+func (u UpdateControllerParams) Validate() error {
+ if u.MetadataURL != nil {
+ u, err := url.Parse(*u.MetadataURL)
+ if err != nil || u.Scheme == "" || u.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid metadata_url")
+ }
+ }
+
+ if u.CallbackURL != nil {
+ u, err := url.Parse(*u.CallbackURL)
+ if err != nil || u.Scheme == "" || u.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid callback_url")
+ }
+ }
+
+ if u.WebhookURL != nil {
+ u, err := url.Parse(*u.WebhookURL)
+ if err != nil || u.Scheme == "" || u.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid webhook_url")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model CreateScaleSetParams
+type CreateScaleSetParams struct {
+ RunnerPrefix
+
+ Name string `json:"name"`
+ DisableUpdate bool `json:"disable_update"`
+ ScaleSetID int `json:"scale_set_id"`
+
+ ProviderName string `json:"provider_name,omitempty"`
+ MaxRunners uint `json:"max_runners,omitempty"`
+ MinIdleRunners uint `json:"min_idle_runners,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+ // GithubRunnerGroup is the github runner group in which the runners of this
+ // pool will be added to.
+ // The runner group must be created by someone with access to the enterprise.
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+}
+
+func (s *CreateScaleSetParams) Validate() error {
+ if s.ProviderName == "" {
+ return fmt.Errorf("missing provider")
+ }
+
+ if s.MinIdleRunners > s.MaxRunners {
+ return fmt.Errorf("min_idle_runners cannot be larger than max_runners")
+ }
+
+ if s.MaxRunners == 0 {
+ return fmt.Errorf("max_runners cannot be 0")
+ }
+
+ if s.Flavor == "" {
+ return fmt.Errorf("missing flavor")
+ }
+
+ if s.Image == "" {
+ return fmt.Errorf("missing image")
+ }
+
+ if s.Name == "" {
+ return fmt.Errorf("missing scale set name")
+ }
+
+ return nil
+}
+
+// swagger:model UpdateScaleSetParams
+type UpdateScaleSetParams struct {
+ RunnerPrefix
+
+ Name string `json:"name,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+ MaxRunners *uint `json:"max_runners,omitempty"`
+ MinIdleRunners *uint `json:"min_idle_runners,omitempty"`
+ RunnerBootstrapTimeout *uint `json:"runner_bootstrap_timeout,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+ // GithubRunnerGroup is the github runner group in which the runners of this
+ // pool will be added to.
+ // The runner group must be created by someone with access to the enterprise.
+ GitHubRunnerGroup *string `json:"runner_group,omitempty"`
+ State *ScaleSetState `json:"state"`
+ ExtendedState *string `json:"extended_state"`
+ ScaleSetID int `json:"-"`
+}
+
+// swagger:model CreateGiteaEndpointParams
+type CreateGiteaEndpointParams struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ APIBaseURL string `json:"api_base_url,omitempty"`
+ BaseURL string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+}
+
+func (c CreateGiteaEndpointParams) Validate() error {
+ if c.APIBaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing api_base_url")
+ }
+
+ url, err := url.Parse(c.APIBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.BaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing base_url")
+ }
+
+ url, err = url.Parse(c.BaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid base_url")
+ }
+
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.CACertBundle != nil {
+ block, _ := pem.Decode(c.CACertBundle)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ if _, err := x509.ParseCertificates(block.Bytes); err != nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateGiteaEndpointParams
+type UpdateGiteaEndpointParams struct {
+ Description *string `json:"description,omitempty"`
+ APIBaseURL *string `json:"api_base_url,omitempty"`
+ BaseURL *string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+}
+
+func (u UpdateGiteaEndpointParams) Validate() error {
+ if u.APIBaseURL != nil {
+ url, err := url.Parse(*u.APIBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.BaseURL != nil {
+ url, err := url.Parse(*u.BaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.CACertBundle != nil {
+ block, _ := pem.Decode(u.CACertBundle)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ if _, err := x509.ParseCertificates(block.Bytes); err != nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model CreateGiteaCredentialsParams
+type CreateGiteaCredentialsParams struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ Endpoint string `json:"endpoint,omitempty"`
+ AuthType ForgeAuthType `json:"auth_type,omitempty"`
+ PAT GithubPAT `json:"pat,omitempty"`
+ App GithubApp `json:"app,omitempty"`
+}
+
+func (c CreateGiteaCredentialsParams) Validate() error {
+ if c.Name == "" {
+ return runnerErrors.NewBadRequestError("missing name")
+ }
+
+ if c.Endpoint == "" {
+ return runnerErrors.NewBadRequestError("missing endpoint")
+ }
+
+ switch c.AuthType {
+ case ForgeAuthTypePAT:
+ default:
+ return runnerErrors.NewBadRequestError("invalid auth_type: %s", c.AuthType)
+ }
+
+ if c.AuthType == ForgeAuthTypePAT {
+ if c.PAT.OAuth2Token == "" {
+ return runnerErrors.NewBadRequestError("missing oauth2_token")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateGiteaCredentialsParams
+type UpdateGiteaCredentialsParams struct {
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ PAT *GithubPAT `json:"pat,omitempty"`
+}
+
+func (u UpdateGiteaCredentialsParams) Validate() error {
+ if u.PAT != nil {
+ if u.PAT.OAuth2Token == "" {
+ return runnerErrors.NewBadRequestError("missing oauth2_token")
+ }
+ }
+
+ return nil
}
diff --git a/runner/common.go b/runner/common.go
new file mode 100644
index 00000000..b1682c0c
--- /dev/null
+++ b/runner/common.go
@@ -0,0 +1,31 @@
+package runner
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) ResolveForgeCredentialByName(ctx context.Context, credentialsName string) (params.ForgeCredentials, error) {
+ githubCred, err := r.store.GetGithubCredentialsByName(ctx, credentialsName, false)
+ if err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ giteaCred, err := r.store.GetGiteaCredentialsByName(ctx, credentialsName, false)
+ if err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ if githubCred.ID != 0 && giteaCred.ID != 0 {
+ return params.ForgeCredentials{}, runnerErrors.NewBadRequestError("credentials %s are defined for both GitHub and Gitea, please specify the forge type", credentialsName)
+ }
+ if githubCred.ID != 0 {
+ return githubCred, nil
+ }
+ if giteaCred.ID != 0 {
+ return giteaCred, nil
+ }
+ return params.ForgeCredentials{}, runnerErrors.NewBadRequestError("credentials %s not found", credentialsName)
+}
diff --git a/runner/common/mocks/GithubClient.go b/runner/common/mocks/GithubClient.go
index fa65dcef..c1dbeae9 100644
--- a/runner/common/mocks/GithubClient.go
+++ b/runner/common/mocks/GithubClient.go
@@ -1,12 +1,16 @@
-// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
- github "github.com/google/go-github/v53/github"
+ github "github.com/google/go-github/v72/github"
mock "github.com/stretchr/testify/mock"
+
+ params "github.com/cloudbase/garm/params"
+
+ url "net/url"
)
// GithubClient is an autogenerated mock type for the GithubClient type
@@ -14,34 +18,105 @@ type GithubClient struct {
mock.Mock
}
-// CreateOrganizationRegistrationToken provides a mock function with given fields: ctx, owner
-func (_m *GithubClient) CreateOrganizationRegistrationToken(ctx context.Context, owner string) (*github.RegistrationToken, *github.Response, error) {
- ret := _m.Called(ctx, owner)
+type GithubClient_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *GithubClient) EXPECT() *GithubClient_Expecter {
+ return &GithubClient_Expecter{mock: &_m.Mock}
+}
+
+// CreateEntityHook provides a mock function with given fields: ctx, hook
+func (_m *GithubClient) CreateEntityHook(ctx context.Context, hook *github.Hook) (*github.Hook, error) {
+ ret := _m.Called(ctx, hook)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityHook")
+ }
+
+ var r0 *github.Hook
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.Hook) (*github.Hook, error)); ok {
+ return rf(ctx, hook)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.Hook) *github.Hook); ok {
+ r0 = rf(ctx, hook)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.Hook) error); ok {
+ r1 = rf(ctx, hook)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubClient_CreateEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityHook'
+type GithubClient_CreateEntityHook_Call struct {
+ *mock.Call
+}
+
+// CreateEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - hook *github.Hook
+func (_e *GithubClient_Expecter) CreateEntityHook(ctx interface{}, hook interface{}) *GithubClient_CreateEntityHook_Call {
+ return &GithubClient_CreateEntityHook_Call{Call: _e.mock.On("CreateEntityHook", ctx, hook)}
+}
+
+func (_c *GithubClient_CreateEntityHook_Call) Run(run func(ctx context.Context, hook *github.Hook)) *GithubClient_CreateEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.Hook))
+ })
+ return _c
+}
+
+func (_c *GithubClient_CreateEntityHook_Call) Return(ret *github.Hook, err error) *GithubClient_CreateEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubClient_CreateEntityHook_Call) RunAndReturn(run func(context.Context, *github.Hook) (*github.Hook, error)) *GithubClient_CreateEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEntityRegistrationToken provides a mock function with given fields: ctx
+func (_m *GithubClient) CreateEntityRegistrationToken(ctx context.Context) (*github.RegistrationToken, *github.Response, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityRegistrationToken")
+ }
var r0 *github.RegistrationToken
var r1 *github.Response
var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (*github.RegistrationToken, *github.Response, error)); ok {
- return rf(ctx, owner)
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RegistrationToken, *github.Response, error)); ok {
+ return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) *github.RegistrationToken); ok {
- r0 = rf(ctx, owner)
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RegistrationToken); ok {
+ r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*github.RegistrationToken)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string) *github.Response); ok {
- r1 = rf(ctx, owner)
+ if rf, ok := ret.Get(1).(func(context.Context) *github.Response); ok {
+ r1 = rf(ctx)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*github.Response)
}
}
- if rf, ok := ret.Get(2).(func(context.Context, string) error); ok {
- r2 = rf(ctx, owner)
+ if rf, ok := ret.Get(2).(func(context.Context) error); ok {
+ r2 = rf(ctx)
} else {
r2 = ret.Error(2)
}
@@ -49,34 +124,229 @@ func (_m *GithubClient) CreateOrganizationRegistrationToken(ctx context.Context,
return r0, r1, r2
}
-// CreateRegistrationToken provides a mock function with given fields: ctx, owner, repo
-func (_m *GithubClient) CreateRegistrationToken(ctx context.Context, owner string, repo string) (*github.RegistrationToken, *github.Response, error) {
- ret := _m.Called(ctx, owner, repo)
+// GithubClient_CreateEntityRegistrationToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityRegistrationToken'
+type GithubClient_CreateEntityRegistrationToken_Call struct {
+ *mock.Call
+}
- var r0 *github.RegistrationToken
- var r1 *github.Response
- var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (*github.RegistrationToken, *github.Response, error)); ok {
- return rf(ctx, owner, repo)
+// CreateEntityRegistrationToken is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubClient_Expecter) CreateEntityRegistrationToken(ctx interface{}) *GithubClient_CreateEntityRegistrationToken_Call {
+ return &GithubClient_CreateEntityRegistrationToken_Call{Call: _e.mock.On("CreateEntityRegistrationToken", ctx)}
+}
+
+func (_c *GithubClient_CreateEntityRegistrationToken_Call) Run(run func(ctx context.Context)) *GithubClient_CreateEntityRegistrationToken_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubClient_CreateEntityRegistrationToken_Call) Return(_a0 *github.RegistrationToken, _a1 *github.Response, _a2 error) *GithubClient_CreateEntityRegistrationToken_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubClient_CreateEntityRegistrationToken_Call) RunAndReturn(run func(context.Context) (*github.RegistrationToken, *github.Response, error)) *GithubClient_CreateEntityRegistrationToken_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubClient) DeleteEntityHook(ctx context.Context, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEntityHook")
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) *github.RegistrationToken); ok {
- r0 = rf(ctx, owner, repo)
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Response, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Response); ok {
+ r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).(*github.RegistrationToken)
+ r0 = ret.Get(0).(*github.Response)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) *github.Response); ok {
- r1 = rf(ctx, owner, repo)
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubClient_DeleteEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEntityHook'
+type GithubClient_DeleteEntityHook_Call struct {
+ *mock.Call
+}
+
+// DeleteEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubClient_Expecter) DeleteEntityHook(ctx interface{}, id interface{}) *GithubClient_DeleteEntityHook_Call {
+ return &GithubClient_DeleteEntityHook_Call{Call: _e.mock.On("DeleteEntityHook", ctx, id)}
+}
+
+func (_c *GithubClient_DeleteEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubClient_DeleteEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_DeleteEntityHook_Call) Return(ret *github.Response, err error) *GithubClient_DeleteEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubClient_DeleteEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Response, error)) *GithubClient_DeleteEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntity provides a mock function with no fields
+func (_m *GithubClient) GetEntity() params.ForgeEntity {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntity")
+ }
+
+ var r0 params.ForgeEntity
+ if rf, ok := ret.Get(0).(func() params.ForgeEntity); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(params.ForgeEntity)
+ }
+
+ return r0
+}
+
+// GithubClient_GetEntity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntity'
+type GithubClient_GetEntity_Call struct {
+ *mock.Call
+}
+
+// GetEntity is a helper method to define mock.On call
+func (_e *GithubClient_Expecter) GetEntity() *GithubClient_GetEntity_Call {
+ return &GithubClient_GetEntity_Call{Call: _e.mock.On("GetEntity")}
+}
+
+func (_c *GithubClient_GetEntity_Call) Run(run func()) *GithubClient_GetEntity_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetEntity_Call) Return(_a0 params.ForgeEntity) *GithubClient_GetEntity_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubClient_GetEntity_Call) RunAndReturn(run func() params.ForgeEntity) *GithubClient_GetEntity_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubClient) GetEntityHook(ctx context.Context, id int64) (*github.Hook, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityHook")
+ }
+
+ var r0 *github.Hook
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Hook, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Hook); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubClient_GetEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityHook'
+type GithubClient_GetEntityHook_Call struct {
+ *mock.Call
+}
+
+// GetEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubClient_Expecter) GetEntityHook(ctx interface{}, id interface{}) *GithubClient_GetEntityHook_Call {
+ return &GithubClient_GetEntityHook_Call{Call: _e.mock.On("GetEntityHook", ctx, id)}
+}
+
+func (_c *GithubClient_GetEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubClient_GetEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetEntityHook_Call) Return(ret *github.Hook, err error) *GithubClient_GetEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubClient_GetEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Hook, error)) *GithubClient_GetEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityJITConfig provides a mock function with given fields: ctx, instance, pool, labels
+func (_m *GithubClient) GetEntityJITConfig(ctx context.Context, instance string, pool params.Pool, labels []string) (map[string]string, *github.Runner, error) {
+ ret := _m.Called(ctx, instance, pool, labels)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityJITConfig")
+ }
+
+ var r0 map[string]string
+ var r1 *github.Runner
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.Pool, []string) (map[string]string, *github.Runner, error)); ok {
+ return rf(ctx, instance, pool, labels)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.Pool, []string) map[string]string); ok {
+ r0 = rf(ctx, instance, pool, labels)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[string]string)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.Pool, []string) *github.Runner); ok {
+ r1 = rf(ctx, instance, pool, labels)
} else {
if ret.Get(1) != nil {
- r1 = ret.Get(1).(*github.Response)
+ r1 = ret.Get(1).(*github.Runner)
}
}
- if rf, ok := ret.Get(2).(func(context.Context, string, string) error); ok {
- r2 = rf(ctx, owner, repo)
+ if rf, ok := ret.Get(2).(func(context.Context, string, params.Pool, []string) error); ok {
+ r2 = rf(ctx, instance, pool, labels)
} else {
r2 = ret.Error(2)
}
@@ -84,10 +354,102 @@ func (_m *GithubClient) CreateRegistrationToken(ctx context.Context, owner strin
return r0, r1, r2
}
+// GithubClient_GetEntityJITConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityJITConfig'
+type GithubClient_GetEntityJITConfig_Call struct {
+ *mock.Call
+}
+
+// GetEntityJITConfig is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - pool params.Pool
+// - labels []string
+func (_e *GithubClient_Expecter) GetEntityJITConfig(ctx interface{}, instance interface{}, pool interface{}, labels interface{}) *GithubClient_GetEntityJITConfig_Call {
+ return &GithubClient_GetEntityJITConfig_Call{Call: _e.mock.On("GetEntityJITConfig", ctx, instance, pool, labels)}
+}
+
+func (_c *GithubClient_GetEntityJITConfig_Call) Run(run func(ctx context.Context, instance string, pool params.Pool, labels []string)) *GithubClient_GetEntityJITConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.Pool), args[3].([]string))
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetEntityJITConfig_Call) Return(jitConfigMap map[string]string, runner *github.Runner, err error) *GithubClient_GetEntityJITConfig_Call {
+ _c.Call.Return(jitConfigMap, runner, err)
+ return _c
+}
+
+func (_c *GithubClient_GetEntityJITConfig_Call) RunAndReturn(run func(context.Context, string, params.Pool, []string) (map[string]string, *github.Runner, error)) *GithubClient_GetEntityJITConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityRunnerGroupIDByName provides a mock function with given fields: ctx, runnerGroupName
+func (_m *GithubClient) GetEntityRunnerGroupIDByName(ctx context.Context, runnerGroupName string) (int64, error) {
+ ret := _m.Called(ctx, runnerGroupName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityRunnerGroupIDByName")
+ }
+
+ var r0 int64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (int64, error)); ok {
+ return rf(ctx, runnerGroupName)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) int64); ok {
+ r0 = rf(ctx, runnerGroupName)
+ } else {
+ r0 = ret.Get(0).(int64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, runnerGroupName)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubClient_GetEntityRunnerGroupIDByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityRunnerGroupIDByName'
+type GithubClient_GetEntityRunnerGroupIDByName_Call struct {
+ *mock.Call
+}
+
+// GetEntityRunnerGroupIDByName is a helper method to define mock.On call
+// - ctx context.Context
+// - runnerGroupName string
+func (_e *GithubClient_Expecter) GetEntityRunnerGroupIDByName(ctx interface{}, runnerGroupName interface{}) *GithubClient_GetEntityRunnerGroupIDByName_Call {
+ return &GithubClient_GetEntityRunnerGroupIDByName_Call{Call: _e.mock.On("GetEntityRunnerGroupIDByName", ctx, runnerGroupName)}
+}
+
+func (_c *GithubClient_GetEntityRunnerGroupIDByName_Call) Run(run func(ctx context.Context, runnerGroupName string)) *GithubClient_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetEntityRunnerGroupIDByName_Call) Return(_a0 int64, _a1 error) *GithubClient_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *GithubClient_GetEntityRunnerGroupIDByName_Call) RunAndReturn(run func(context.Context, string) (int64, error)) *GithubClient_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetWorkflowJobByID provides a mock function with given fields: ctx, owner, repo, jobID
func (_m *GithubClient) GetWorkflowJobByID(ctx context.Context, owner string, repo string, jobID int64) (*github.WorkflowJob, *github.Response, error) {
ret := _m.Called(ctx, owner, repo, jobID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetWorkflowJobByID")
+ }
+
var r0 *github.WorkflowJob
var r1 *github.Response
var r2 error
@@ -119,34 +481,184 @@ func (_m *GithubClient) GetWorkflowJobByID(ctx context.Context, owner string, re
return r0, r1, r2
}
-// ListOrganizationRunnerApplicationDownloads provides a mock function with given fields: ctx, owner
-func (_m *GithubClient) ListOrganizationRunnerApplicationDownloads(ctx context.Context, owner string) ([]*github.RunnerApplicationDownload, *github.Response, error) {
- ret := _m.Called(ctx, owner)
+// GithubClient_GetWorkflowJobByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetWorkflowJobByID'
+type GithubClient_GetWorkflowJobByID_Call struct {
+ *mock.Call
+}
+
+// GetWorkflowJobByID is a helper method to define mock.On call
+// - ctx context.Context
+// - owner string
+// - repo string
+// - jobID int64
+func (_e *GithubClient_Expecter) GetWorkflowJobByID(ctx interface{}, owner interface{}, repo interface{}, jobID interface{}) *GithubClient_GetWorkflowJobByID_Call {
+ return &GithubClient_GetWorkflowJobByID_Call{Call: _e.mock.On("GetWorkflowJobByID", ctx, owner, repo, jobID)}
+}
+
+func (_c *GithubClient_GetWorkflowJobByID_Call) Run(run func(ctx context.Context, owner string, repo string, jobID int64)) *GithubClient_GetWorkflowJobByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetWorkflowJobByID_Call) Return(_a0 *github.WorkflowJob, _a1 *github.Response, _a2 error) *GithubClient_GetWorkflowJobByID_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubClient_GetWorkflowJobByID_Call) RunAndReturn(run func(context.Context, string, string, int64) (*github.WorkflowJob, *github.Response, error)) *GithubClient_GetWorkflowJobByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GithubBaseURL provides a mock function with no fields
+func (_m *GithubClient) GithubBaseURL() *url.URL {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GithubBaseURL")
+ }
+
+ var r0 *url.URL
+ if rf, ok := ret.Get(0).(func() *url.URL); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*url.URL)
+ }
+ }
+
+ return r0
+}
+
+// GithubClient_GithubBaseURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GithubBaseURL'
+type GithubClient_GithubBaseURL_Call struct {
+ *mock.Call
+}
+
+// GithubBaseURL is a helper method to define mock.On call
+func (_e *GithubClient_Expecter) GithubBaseURL() *GithubClient_GithubBaseURL_Call {
+ return &GithubClient_GithubBaseURL_Call{Call: _e.mock.On("GithubBaseURL")}
+}
+
+func (_c *GithubClient_GithubBaseURL_Call) Run(run func()) *GithubClient_GithubBaseURL_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *GithubClient_GithubBaseURL_Call) Return(_a0 *url.URL) *GithubClient_GithubBaseURL_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubClient_GithubBaseURL_Call) RunAndReturn(run func() *url.URL) *GithubClient_GithubBaseURL_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityHooks provides a mock function with given fields: ctx, opts
+func (_m *GithubClient) ListEntityHooks(ctx context.Context, opts *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityHooks")
+ }
+
+ var r0 []*github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListOptions) ([]*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListOptions) []*github.Hook); ok {
+ r0 = rf(ctx, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.ListOptions) *github.Response); ok {
+ r1 = rf(ctx, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, *github.ListOptions) error); ok {
+ r2 = rf(ctx, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubClient_ListEntityHooks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityHooks'
+type GithubClient_ListEntityHooks_Call struct {
+ *mock.Call
+}
+
+// ListEntityHooks is a helper method to define mock.On call
+// - ctx context.Context
+// - opts *github.ListOptions
+func (_e *GithubClient_Expecter) ListEntityHooks(ctx interface{}, opts interface{}) *GithubClient_ListEntityHooks_Call {
+ return &GithubClient_ListEntityHooks_Call{Call: _e.mock.On("ListEntityHooks", ctx, opts)}
+}
+
+func (_c *GithubClient_ListEntityHooks_Call) Run(run func(ctx context.Context, opts *github.ListOptions)) *GithubClient_ListEntityHooks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.ListOptions))
+ })
+ return _c
+}
+
+func (_c *GithubClient_ListEntityHooks_Call) Return(ret []*github.Hook, response *github.Response, err error) *GithubClient_ListEntityHooks_Call {
+ _c.Call.Return(ret, response, err)
+ return _c
+}
+
+func (_c *GithubClient_ListEntityHooks_Call) RunAndReturn(run func(context.Context, *github.ListOptions) ([]*github.Hook, *github.Response, error)) *GithubClient_ListEntityHooks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityRunnerApplicationDownloads provides a mock function with given fields: ctx
+func (_m *GithubClient) ListEntityRunnerApplicationDownloads(ctx context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityRunnerApplicationDownloads")
+ }
var r0 []*github.RunnerApplicationDownload
var r1 *github.Response
var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]*github.RunnerApplicationDownload, *github.Response, error)); ok {
- return rf(ctx, owner)
+ if rf, ok := ret.Get(0).(func(context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)); ok {
+ return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) []*github.RunnerApplicationDownload); ok {
- r0 = rf(ctx, owner)
+ if rf, ok := ret.Get(0).(func(context.Context) []*github.RunnerApplicationDownload); ok {
+ r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*github.RunnerApplicationDownload)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string) *github.Response); ok {
- r1 = rf(ctx, owner)
+ if rf, ok := ret.Get(1).(func(context.Context) *github.Response); ok {
+ r1 = rf(ctx)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*github.Response)
}
}
- if rf, ok := ret.Get(2).(func(context.Context, string) error); ok {
- r2 = rf(ctx, owner)
+ if rf, ok := ret.Get(2).(func(context.Context) error); ok {
+ r2 = rf(ctx)
} else {
r2 = ret.Error(2)
}
@@ -154,34 +666,66 @@ func (_m *GithubClient) ListOrganizationRunnerApplicationDownloads(ctx context.C
return r0, r1, r2
}
-// ListOrganizationRunners provides a mock function with given fields: ctx, owner, opts
-func (_m *GithubClient) ListOrganizationRunners(ctx context.Context, owner string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
- ret := _m.Called(ctx, owner, opts)
+// GithubClient_ListEntityRunnerApplicationDownloads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityRunnerApplicationDownloads'
+type GithubClient_ListEntityRunnerApplicationDownloads_Call struct {
+ *mock.Call
+}
+
+// ListEntityRunnerApplicationDownloads is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubClient_Expecter) ListEntityRunnerApplicationDownloads(ctx interface{}) *GithubClient_ListEntityRunnerApplicationDownloads_Call {
+ return &GithubClient_ListEntityRunnerApplicationDownloads_Call{Call: _e.mock.On("ListEntityRunnerApplicationDownloads", ctx)}
+}
+
+func (_c *GithubClient_ListEntityRunnerApplicationDownloads_Call) Run(run func(ctx context.Context)) *GithubClient_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubClient_ListEntityRunnerApplicationDownloads_Call) Return(_a0 []*github.RunnerApplicationDownload, _a1 *github.Response, _a2 error) *GithubClient_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubClient_ListEntityRunnerApplicationDownloads_Call) RunAndReturn(run func(context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)) *GithubClient_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityRunners provides a mock function with given fields: ctx, opts
+func (_m *GithubClient) ListEntityRunners(ctx context.Context, opts *github.ListRunnersOptions) (*github.Runners, *github.Response, error) {
+ ret := _m.Called(ctx, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityRunners")
+ }
var r0 *github.Runners
var r1 *github.Response
var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListOptions) (*github.Runners, *github.Response, error)); ok {
- return rf(ctx, owner, opts)
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListRunnersOptions) (*github.Runners, *github.Response, error)); ok {
+ return rf(ctx, opts)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListOptions) *github.Runners); ok {
- r0 = rf(ctx, owner, opts)
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListRunnersOptions) *github.Runners); ok {
+ r0 = rf(ctx, opts)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*github.Runners)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, *github.ListOptions) *github.Response); ok {
- r1 = rf(ctx, owner, opts)
+ if rf, ok := ret.Get(1).(func(context.Context, *github.ListRunnersOptions) *github.Response); ok {
+ r1 = rf(ctx, opts)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*github.Response)
}
}
- if rf, ok := ret.Get(2).(func(context.Context, string, *github.ListOptions) error); ok {
- r2 = rf(ctx, owner, opts)
+ if rf, ok := ret.Get(2).(func(context.Context, *github.ListRunnersOptions) error); ok {
+ r2 = rf(ctx, opts)
} else {
r2 = ret.Error(2)
}
@@ -189,95 +733,58 @@ func (_m *GithubClient) ListOrganizationRunners(ctx context.Context, owner strin
return r0, r1, r2
}
-// ListRunnerApplicationDownloads provides a mock function with given fields: ctx, owner, repo
-func (_m *GithubClient) ListRunnerApplicationDownloads(ctx context.Context, owner string, repo string) ([]*github.RunnerApplicationDownload, *github.Response, error) {
- ret := _m.Called(ctx, owner, repo)
-
- var r0 []*github.RunnerApplicationDownload
- var r1 *github.Response
- var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]*github.RunnerApplicationDownload, *github.Response, error)); ok {
- return rf(ctx, owner, repo)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string) []*github.RunnerApplicationDownload); ok {
- r0 = rf(ctx, owner, repo)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]*github.RunnerApplicationDownload)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, string) *github.Response); ok {
- r1 = rf(ctx, owner, repo)
- } else {
- if ret.Get(1) != nil {
- r1 = ret.Get(1).(*github.Response)
- }
- }
-
- if rf, ok := ret.Get(2).(func(context.Context, string, string) error); ok {
- r2 = rf(ctx, owner, repo)
- } else {
- r2 = ret.Error(2)
- }
-
- return r0, r1, r2
+// GithubClient_ListEntityRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityRunners'
+type GithubClient_ListEntityRunners_Call struct {
+ *mock.Call
}
-// ListRunners provides a mock function with given fields: ctx, owner, repo, opts
-func (_m *GithubClient) ListRunners(ctx context.Context, owner string, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
- ret := _m.Called(ctx, owner, repo, opts)
-
- var r0 *github.Runners
- var r1 *github.Response
- var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) (*github.Runners, *github.Response, error)); ok {
- return rf(ctx, owner, repo, opts)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) *github.Runners); ok {
- r0 = rf(ctx, owner, repo, opts)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*github.Runners)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, string, *github.ListOptions) *github.Response); ok {
- r1 = rf(ctx, owner, repo, opts)
- } else {
- if ret.Get(1) != nil {
- r1 = ret.Get(1).(*github.Response)
- }
- }
-
- if rf, ok := ret.Get(2).(func(context.Context, string, string, *github.ListOptions) error); ok {
- r2 = rf(ctx, owner, repo, opts)
- } else {
- r2 = ret.Error(2)
- }
-
- return r0, r1, r2
+// ListEntityRunners is a helper method to define mock.On call
+// - ctx context.Context
+// - opts *github.ListRunnersOptions
+func (_e *GithubClient_Expecter) ListEntityRunners(ctx interface{}, opts interface{}) *GithubClient_ListEntityRunners_Call {
+ return &GithubClient_ListEntityRunners_Call{Call: _e.mock.On("ListEntityRunners", ctx, opts)}
}
-// RemoveOrganizationRunner provides a mock function with given fields: ctx, owner, runnerID
-func (_m *GithubClient) RemoveOrganizationRunner(ctx context.Context, owner string, runnerID int64) (*github.Response, error) {
- ret := _m.Called(ctx, owner, runnerID)
+func (_c *GithubClient_ListEntityRunners_Call) Run(run func(ctx context.Context, opts *github.ListRunnersOptions)) *GithubClient_ListEntityRunners_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.ListRunnersOptions))
+ })
+ return _c
+}
+
+func (_c *GithubClient_ListEntityRunners_Call) Return(_a0 *github.Runners, _a1 *github.Response, _a2 error) *GithubClient_ListEntityRunners_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubClient_ListEntityRunners_Call) RunAndReturn(run func(context.Context, *github.ListRunnersOptions) (*github.Runners, *github.Response, error)) *GithubClient_ListEntityRunners_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// PingEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubClient) PingEntityHook(ctx context.Context, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PingEntityHook")
+ }
var r0 *github.Response
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Response, error)); ok {
- return rf(ctx, owner, runnerID)
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Response, error)); ok {
+ return rf(ctx, id)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, int64) *github.Response); ok {
- r0 = rf(ctx, owner, runnerID)
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Response); ok {
+ r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*github.Response)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok {
- r1 = rf(ctx, owner, runnerID)
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
@@ -285,25 +792,58 @@ func (_m *GithubClient) RemoveOrganizationRunner(ctx context.Context, owner stri
return r0, r1
}
-// RemoveRunner provides a mock function with given fields: ctx, owner, repo, runnerID
-func (_m *GithubClient) RemoveRunner(ctx context.Context, owner string, repo string, runnerID int64) (*github.Response, error) {
- ret := _m.Called(ctx, owner, repo, runnerID)
+// GithubClient_PingEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PingEntityHook'
+type GithubClient_PingEntityHook_Call struct {
+ *mock.Call
+}
- var r0 *github.Response
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*github.Response, error)); ok {
- return rf(ctx, owner, repo, runnerID)
+// PingEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubClient_Expecter) PingEntityHook(ctx interface{}, id interface{}) *GithubClient_PingEntityHook_Call {
+ return &GithubClient_PingEntityHook_Call{Call: _e.mock.On("PingEntityHook", ctx, id)}
+}
+
+func (_c *GithubClient_PingEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubClient_PingEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_PingEntityHook_Call) Return(ret *github.Response, err error) *GithubClient_PingEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubClient_PingEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Response, error)) *GithubClient_PingEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RateLimit provides a mock function with given fields: ctx
+func (_m *GithubClient) RateLimit(ctx context.Context) (*github.RateLimits, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RateLimit")
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *github.Response); ok {
- r0 = rf(ctx, owner, repo, runnerID)
+
+ var r0 *github.RateLimits
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RateLimits, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RateLimits); ok {
+ r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).(*github.Response)
+ r0 = ret.Get(0).(*github.RateLimits)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
- r1 = rf(ctx, owner, repo, runnerID)
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
@@ -311,6 +851,81 @@ func (_m *GithubClient) RemoveRunner(ctx context.Context, owner string, repo str
return r0, r1
}
+// GithubClient_RateLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RateLimit'
+type GithubClient_RateLimit_Call struct {
+ *mock.Call
+}
+
+// RateLimit is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubClient_Expecter) RateLimit(ctx interface{}) *GithubClient_RateLimit_Call {
+ return &GithubClient_RateLimit_Call{Call: _e.mock.On("RateLimit", ctx)}
+}
+
+func (_c *GithubClient_RateLimit_Call) Run(run func(ctx context.Context)) *GithubClient_RateLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubClient_RateLimit_Call) Return(_a0 *github.RateLimits, _a1 error) *GithubClient_RateLimit_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *GithubClient_RateLimit_Call) RunAndReturn(run func(context.Context) (*github.RateLimits, error)) *GithubClient_RateLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RemoveEntityRunner provides a mock function with given fields: ctx, runnerID
+func (_m *GithubClient) RemoveEntityRunner(ctx context.Context, runnerID int64) error {
+ ret := _m.Called(ctx, runnerID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveEntityRunner")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
+ r0 = rf(ctx, runnerID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// GithubClient_RemoveEntityRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveEntityRunner'
+type GithubClient_RemoveEntityRunner_Call struct {
+ *mock.Call
+}
+
+// RemoveEntityRunner is a helper method to define mock.On call
+// - ctx context.Context
+// - runnerID int64
+func (_e *GithubClient_Expecter) RemoveEntityRunner(ctx interface{}, runnerID interface{}) *GithubClient_RemoveEntityRunner_Call {
+ return &GithubClient_RemoveEntityRunner_Call{Call: _e.mock.On("RemoveEntityRunner", ctx, runnerID)}
+}
+
+func (_c *GithubClient_RemoveEntityRunner_Call) Run(run func(ctx context.Context, runnerID int64)) *GithubClient_RemoveEntityRunner_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_RemoveEntityRunner_Call) Return(_a0 error) *GithubClient_RemoveEntityRunner_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubClient_RemoveEntityRunner_Call) RunAndReturn(run func(context.Context, int64) error) *GithubClient_RemoveEntityRunner_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// NewGithubClient creates a new instance of GithubClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewGithubClient(t interface {
diff --git a/runner/common/mocks/GithubEnterpriseClient.go b/runner/common/mocks/GithubEnterpriseClient.go
index 8233d062..5606e340 100644
--- a/runner/common/mocks/GithubEnterpriseClient.go
+++ b/runner/common/mocks/GithubEnterpriseClient.go
@@ -1,11 +1,11 @@
-// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
+// Code generated by mockery v2.42.0. DO NOT EDIT.
package mocks
import (
context "context"
- github "github.com/google/go-github/v53/github"
+ github "github.com/google/go-github/v72/github"
mock "github.com/stretchr/testify/mock"
)
@@ -18,6 +18,10 @@ type GithubEnterpriseClient struct {
func (_m *GithubEnterpriseClient) CreateRegistrationToken(ctx context.Context, enterprise string) (*github.RegistrationToken, *github.Response, error) {
ret := _m.Called(ctx, enterprise)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateRegistrationToken")
+ }
+
var r0 *github.RegistrationToken
var r1 *github.Response
var r2 error
@@ -49,10 +53,53 @@ func (_m *GithubEnterpriseClient) CreateRegistrationToken(ctx context.Context, e
return r0, r1, r2
}
+// GenerateEnterpriseJITConfig provides a mock function with given fields: ctx, enterprise, request
+func (_m *GithubEnterpriseClient) GenerateEnterpriseJITConfig(ctx context.Context, enterprise string, request *github.GenerateJITConfigRequest) (*github.JITRunnerConfig, *github.Response, error) {
+ ret := _m.Called(ctx, enterprise, request)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GenerateEnterpriseJITConfig")
+ }
+
+ var r0 *github.JITRunnerConfig
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.GenerateJITConfigRequest) (*github.JITRunnerConfig, *github.Response, error)); ok {
+ return rf(ctx, enterprise, request)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.GenerateJITConfigRequest) *github.JITRunnerConfig); ok {
+ r0 = rf(ctx, enterprise, request)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.JITRunnerConfig)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, *github.GenerateJITConfigRequest) *github.Response); ok {
+ r1 = rf(ctx, enterprise, request)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, *github.GenerateJITConfigRequest) error); ok {
+ r2 = rf(ctx, enterprise, request)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
// ListRunnerApplicationDownloads provides a mock function with given fields: ctx, enterprise
func (_m *GithubEnterpriseClient) ListRunnerApplicationDownloads(ctx context.Context, enterprise string) ([]*github.RunnerApplicationDownload, *github.Response, error) {
ret := _m.Called(ctx, enterprise)
+ if len(ret) == 0 {
+ panic("no return value specified for ListRunnerApplicationDownloads")
+ }
+
var r0 []*github.RunnerApplicationDownload
var r1 *github.Response
var r2 error
@@ -84,10 +131,53 @@ func (_m *GithubEnterpriseClient) ListRunnerApplicationDownloads(ctx context.Con
return r0, r1, r2
}
+// ListRunnerGroups provides a mock function with given fields: ctx, enterprise, opts
+func (_m *GithubEnterpriseClient) ListRunnerGroups(ctx context.Context, enterprise string, opts *github.ListEnterpriseRunnerGroupOptions) (*github.EnterpriseRunnerGroups, *github.Response, error) {
+ ret := _m.Called(ctx, enterprise, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListRunnerGroups")
+ }
+
+ var r0 *github.EnterpriseRunnerGroups
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListEnterpriseRunnerGroupOptions) (*github.EnterpriseRunnerGroups, *github.Response, error)); ok {
+ return rf(ctx, enterprise, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListEnterpriseRunnerGroupOptions) *github.EnterpriseRunnerGroups); ok {
+ r0 = rf(ctx, enterprise, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.EnterpriseRunnerGroups)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, *github.ListEnterpriseRunnerGroupOptions) *github.Response); ok {
+ r1 = rf(ctx, enterprise, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, *github.ListEnterpriseRunnerGroupOptions) error); ok {
+ r2 = rf(ctx, enterprise, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
// ListRunners provides a mock function with given fields: ctx, enterprise, opts
func (_m *GithubEnterpriseClient) ListRunners(ctx context.Context, enterprise string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
ret := _m.Called(ctx, enterprise, opts)
+ if len(ret) == 0 {
+ panic("no return value specified for ListRunners")
+ }
+
var r0 *github.Runners
var r1 *github.Response
var r2 error
@@ -123,6 +213,10 @@ func (_m *GithubEnterpriseClient) ListRunners(ctx context.Context, enterprise st
func (_m *GithubEnterpriseClient) RemoveRunner(ctx context.Context, enterprise string, runnerID int64) (*github.Response, error) {
ret := _m.Called(ctx, enterprise, runnerID)
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveRunner")
+ }
+
var r0 *github.Response
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Response, error)); ok {
diff --git a/runner/common/mocks/GithubEntityOperations.go b/runner/common/mocks/GithubEntityOperations.go
new file mode 100644
index 00000000..0b3c3f83
--- /dev/null
+++ b/runner/common/mocks/GithubEntityOperations.go
@@ -0,0 +1,871 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ github "github.com/google/go-github/v72/github"
+ mock "github.com/stretchr/testify/mock"
+
+ params "github.com/cloudbase/garm/params"
+
+ url "net/url"
+)
+
+// GithubEntityOperations is an autogenerated mock type for the GithubEntityOperations type
+type GithubEntityOperations struct {
+ mock.Mock
+}
+
+type GithubEntityOperations_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *GithubEntityOperations) EXPECT() *GithubEntityOperations_Expecter {
+ return &GithubEntityOperations_Expecter{mock: &_m.Mock}
+}
+
+// CreateEntityHook provides a mock function with given fields: ctx, hook
+func (_m *GithubEntityOperations) CreateEntityHook(ctx context.Context, hook *github.Hook) (*github.Hook, error) {
+ ret := _m.Called(ctx, hook)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityHook")
+ }
+
+ var r0 *github.Hook
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.Hook) (*github.Hook, error)); ok {
+ return rf(ctx, hook)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.Hook) *github.Hook); ok {
+ r0 = rf(ctx, hook)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.Hook) error); ok {
+ r1 = rf(ctx, hook)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_CreateEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityHook'
+type GithubEntityOperations_CreateEntityHook_Call struct {
+ *mock.Call
+}
+
+// CreateEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - hook *github.Hook
+func (_e *GithubEntityOperations_Expecter) CreateEntityHook(ctx interface{}, hook interface{}) *GithubEntityOperations_CreateEntityHook_Call {
+ return &GithubEntityOperations_CreateEntityHook_Call{Call: _e.mock.On("CreateEntityHook", ctx, hook)}
+}
+
+func (_c *GithubEntityOperations_CreateEntityHook_Call) Run(run func(ctx context.Context, hook *github.Hook)) *GithubEntityOperations_CreateEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.Hook))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_CreateEntityHook_Call) Return(ret *github.Hook, err error) *GithubEntityOperations_CreateEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_CreateEntityHook_Call) RunAndReturn(run func(context.Context, *github.Hook) (*github.Hook, error)) *GithubEntityOperations_CreateEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEntityRegistrationToken provides a mock function with given fields: ctx
+func (_m *GithubEntityOperations) CreateEntityRegistrationToken(ctx context.Context) (*github.RegistrationToken, *github.Response, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityRegistrationToken")
+ }
+
+ var r0 *github.RegistrationToken
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RegistrationToken, *github.Response, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RegistrationToken); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.RegistrationToken)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) *github.Response); ok {
+ r1 = rf(ctx)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context) error); ok {
+ r2 = rf(ctx)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_CreateEntityRegistrationToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityRegistrationToken'
+type GithubEntityOperations_CreateEntityRegistrationToken_Call struct {
+ *mock.Call
+}
+
+// CreateEntityRegistrationToken is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubEntityOperations_Expecter) CreateEntityRegistrationToken(ctx interface{}) *GithubEntityOperations_CreateEntityRegistrationToken_Call {
+ return &GithubEntityOperations_CreateEntityRegistrationToken_Call{Call: _e.mock.On("CreateEntityRegistrationToken", ctx)}
+}
+
+func (_c *GithubEntityOperations_CreateEntityRegistrationToken_Call) Run(run func(ctx context.Context)) *GithubEntityOperations_CreateEntityRegistrationToken_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_CreateEntityRegistrationToken_Call) Return(_a0 *github.RegistrationToken, _a1 *github.Response, _a2 error) *GithubEntityOperations_CreateEntityRegistrationToken_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubEntityOperations_CreateEntityRegistrationToken_Call) RunAndReturn(run func(context.Context) (*github.RegistrationToken, *github.Response, error)) *GithubEntityOperations_CreateEntityRegistrationToken_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubEntityOperations) DeleteEntityHook(ctx context.Context, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEntityHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Response, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Response); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_DeleteEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEntityHook'
+type GithubEntityOperations_DeleteEntityHook_Call struct {
+ *mock.Call
+}
+
+// DeleteEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubEntityOperations_Expecter) DeleteEntityHook(ctx interface{}, id interface{}) *GithubEntityOperations_DeleteEntityHook_Call {
+ return &GithubEntityOperations_DeleteEntityHook_Call{Call: _e.mock.On("DeleteEntityHook", ctx, id)}
+}
+
+func (_c *GithubEntityOperations_DeleteEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubEntityOperations_DeleteEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_DeleteEntityHook_Call) Return(ret *github.Response, err error) *GithubEntityOperations_DeleteEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_DeleteEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Response, error)) *GithubEntityOperations_DeleteEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntity provides a mock function with no fields
+func (_m *GithubEntityOperations) GetEntity() params.ForgeEntity {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntity")
+ }
+
+ var r0 params.ForgeEntity
+ if rf, ok := ret.Get(0).(func() params.ForgeEntity); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(params.ForgeEntity)
+ }
+
+ return r0
+}
+
+// GithubEntityOperations_GetEntity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntity'
+type GithubEntityOperations_GetEntity_Call struct {
+ *mock.Call
+}
+
+// GetEntity is a helper method to define mock.On call
+func (_e *GithubEntityOperations_Expecter) GetEntity() *GithubEntityOperations_GetEntity_Call {
+ return &GithubEntityOperations_GetEntity_Call{Call: _e.mock.On("GetEntity")}
+}
+
+func (_c *GithubEntityOperations_GetEntity_Call) Run(run func()) *GithubEntityOperations_GetEntity_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntity_Call) Return(_a0 params.ForgeEntity) *GithubEntityOperations_GetEntity_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntity_Call) RunAndReturn(run func() params.ForgeEntity) *GithubEntityOperations_GetEntity_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubEntityOperations) GetEntityHook(ctx context.Context, id int64) (*github.Hook, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityHook")
+ }
+
+ var r0 *github.Hook
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Hook, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Hook); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_GetEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityHook'
+type GithubEntityOperations_GetEntityHook_Call struct {
+ *mock.Call
+}
+
+// GetEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubEntityOperations_Expecter) GetEntityHook(ctx interface{}, id interface{}) *GithubEntityOperations_GetEntityHook_Call {
+ return &GithubEntityOperations_GetEntityHook_Call{Call: _e.mock.On("GetEntityHook", ctx, id)}
+}
+
+func (_c *GithubEntityOperations_GetEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubEntityOperations_GetEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityHook_Call) Return(ret *github.Hook, err error) *GithubEntityOperations_GetEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Hook, error)) *GithubEntityOperations_GetEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityJITConfig provides a mock function with given fields: ctx, instance, pool, labels
+func (_m *GithubEntityOperations) GetEntityJITConfig(ctx context.Context, instance string, pool params.Pool, labels []string) (map[string]string, *github.Runner, error) {
+ ret := _m.Called(ctx, instance, pool, labels)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityJITConfig")
+ }
+
+ var r0 map[string]string
+ var r1 *github.Runner
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.Pool, []string) (map[string]string, *github.Runner, error)); ok {
+ return rf(ctx, instance, pool, labels)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.Pool, []string) map[string]string); ok {
+ r0 = rf(ctx, instance, pool, labels)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[string]string)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.Pool, []string) *github.Runner); ok {
+ r1 = rf(ctx, instance, pool, labels)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Runner)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, params.Pool, []string) error); ok {
+ r2 = rf(ctx, instance, pool, labels)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_GetEntityJITConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityJITConfig'
+type GithubEntityOperations_GetEntityJITConfig_Call struct {
+ *mock.Call
+}
+
+// GetEntityJITConfig is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - pool params.Pool
+// - labels []string
+func (_e *GithubEntityOperations_Expecter) GetEntityJITConfig(ctx interface{}, instance interface{}, pool interface{}, labels interface{}) *GithubEntityOperations_GetEntityJITConfig_Call {
+ return &GithubEntityOperations_GetEntityJITConfig_Call{Call: _e.mock.On("GetEntityJITConfig", ctx, instance, pool, labels)}
+}
+
+func (_c *GithubEntityOperations_GetEntityJITConfig_Call) Run(run func(ctx context.Context, instance string, pool params.Pool, labels []string)) *GithubEntityOperations_GetEntityJITConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.Pool), args[3].([]string))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityJITConfig_Call) Return(jitConfigMap map[string]string, runner *github.Runner, err error) *GithubEntityOperations_GetEntityJITConfig_Call {
+ _c.Call.Return(jitConfigMap, runner, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityJITConfig_Call) RunAndReturn(run func(context.Context, string, params.Pool, []string) (map[string]string, *github.Runner, error)) *GithubEntityOperations_GetEntityJITConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityRunnerGroupIDByName provides a mock function with given fields: ctx, runnerGroupName
+func (_m *GithubEntityOperations) GetEntityRunnerGroupIDByName(ctx context.Context, runnerGroupName string) (int64, error) {
+ ret := _m.Called(ctx, runnerGroupName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityRunnerGroupIDByName")
+ }
+
+ var r0 int64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (int64, error)); ok {
+ return rf(ctx, runnerGroupName)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) int64); ok {
+ r0 = rf(ctx, runnerGroupName)
+ } else {
+ r0 = ret.Get(0).(int64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, runnerGroupName)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_GetEntityRunnerGroupIDByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityRunnerGroupIDByName'
+type GithubEntityOperations_GetEntityRunnerGroupIDByName_Call struct {
+ *mock.Call
+}
+
+// GetEntityRunnerGroupIDByName is a helper method to define mock.On call
+// - ctx context.Context
+// - runnerGroupName string
+func (_e *GithubEntityOperations_Expecter) GetEntityRunnerGroupIDByName(ctx interface{}, runnerGroupName interface{}) *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call {
+ return &GithubEntityOperations_GetEntityRunnerGroupIDByName_Call{Call: _e.mock.On("GetEntityRunnerGroupIDByName", ctx, runnerGroupName)}
+}
+
+func (_c *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call) Run(run func(ctx context.Context, runnerGroupName string)) *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call) Return(_a0 int64, _a1 error) *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call) RunAndReturn(run func(context.Context, string) (int64, error)) *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GithubBaseURL provides a mock function with no fields
+func (_m *GithubEntityOperations) GithubBaseURL() *url.URL {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GithubBaseURL")
+ }
+
+ var r0 *url.URL
+ if rf, ok := ret.Get(0).(func() *url.URL); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*url.URL)
+ }
+ }
+
+ return r0
+}
+
+// GithubEntityOperations_GithubBaseURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GithubBaseURL'
+type GithubEntityOperations_GithubBaseURL_Call struct {
+ *mock.Call
+}
+
+// GithubBaseURL is a helper method to define mock.On call
+func (_e *GithubEntityOperations_Expecter) GithubBaseURL() *GithubEntityOperations_GithubBaseURL_Call {
+ return &GithubEntityOperations_GithubBaseURL_Call{Call: _e.mock.On("GithubBaseURL")}
+}
+
+func (_c *GithubEntityOperations_GithubBaseURL_Call) Run(run func()) *GithubEntityOperations_GithubBaseURL_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GithubBaseURL_Call) Return(_a0 *url.URL) *GithubEntityOperations_GithubBaseURL_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GithubBaseURL_Call) RunAndReturn(run func() *url.URL) *GithubEntityOperations_GithubBaseURL_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityHooks provides a mock function with given fields: ctx, opts
+func (_m *GithubEntityOperations) ListEntityHooks(ctx context.Context, opts *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityHooks")
+ }
+
+ var r0 []*github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListOptions) ([]*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListOptions) []*github.Hook); ok {
+ r0 = rf(ctx, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.ListOptions) *github.Response); ok {
+ r1 = rf(ctx, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, *github.ListOptions) error); ok {
+ r2 = rf(ctx, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_ListEntityHooks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityHooks'
+type GithubEntityOperations_ListEntityHooks_Call struct {
+ *mock.Call
+}
+
+// ListEntityHooks is a helper method to define mock.On call
+// - ctx context.Context
+// - opts *github.ListOptions
+func (_e *GithubEntityOperations_Expecter) ListEntityHooks(ctx interface{}, opts interface{}) *GithubEntityOperations_ListEntityHooks_Call {
+ return &GithubEntityOperations_ListEntityHooks_Call{Call: _e.mock.On("ListEntityHooks", ctx, opts)}
+}
+
+func (_c *GithubEntityOperations_ListEntityHooks_Call) Run(run func(ctx context.Context, opts *github.ListOptions)) *GithubEntityOperations_ListEntityHooks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.ListOptions))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityHooks_Call) Return(ret []*github.Hook, response *github.Response, err error) *GithubEntityOperations_ListEntityHooks_Call {
+ _c.Call.Return(ret, response, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityHooks_Call) RunAndReturn(run func(context.Context, *github.ListOptions) ([]*github.Hook, *github.Response, error)) *GithubEntityOperations_ListEntityHooks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityRunnerApplicationDownloads provides a mock function with given fields: ctx
+func (_m *GithubEntityOperations) ListEntityRunnerApplicationDownloads(ctx context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityRunnerApplicationDownloads")
+ }
+
+ var r0 []*github.RunnerApplicationDownload
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []*github.RunnerApplicationDownload); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.RunnerApplicationDownload)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) *github.Response); ok {
+ r1 = rf(ctx)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context) error); ok {
+ r2 = rf(ctx)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityRunnerApplicationDownloads'
+type GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call struct {
+ *mock.Call
+}
+
+// ListEntityRunnerApplicationDownloads is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubEntityOperations_Expecter) ListEntityRunnerApplicationDownloads(ctx interface{}) *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call {
+ return &GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call{Call: _e.mock.On("ListEntityRunnerApplicationDownloads", ctx)}
+}
+
+func (_c *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call) Run(run func(ctx context.Context)) *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call) Return(_a0 []*github.RunnerApplicationDownload, _a1 *github.Response, _a2 error) *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call) RunAndReturn(run func(context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)) *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityRunners provides a mock function with given fields: ctx, opts
+func (_m *GithubEntityOperations) ListEntityRunners(ctx context.Context, opts *github.ListRunnersOptions) (*github.Runners, *github.Response, error) {
+ ret := _m.Called(ctx, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityRunners")
+ }
+
+ var r0 *github.Runners
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListRunnersOptions) (*github.Runners, *github.Response, error)); ok {
+ return rf(ctx, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListRunnersOptions) *github.Runners); ok {
+ r0 = rf(ctx, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Runners)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.ListRunnersOptions) *github.Response); ok {
+ r1 = rf(ctx, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, *github.ListRunnersOptions) error); ok {
+ r2 = rf(ctx, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_ListEntityRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityRunners'
+type GithubEntityOperations_ListEntityRunners_Call struct {
+ *mock.Call
+}
+
+// ListEntityRunners is a helper method to define mock.On call
+// - ctx context.Context
+// - opts *github.ListRunnersOptions
+func (_e *GithubEntityOperations_Expecter) ListEntityRunners(ctx interface{}, opts interface{}) *GithubEntityOperations_ListEntityRunners_Call {
+ return &GithubEntityOperations_ListEntityRunners_Call{Call: _e.mock.On("ListEntityRunners", ctx, opts)}
+}
+
+func (_c *GithubEntityOperations_ListEntityRunners_Call) Run(run func(ctx context.Context, opts *github.ListRunnersOptions)) *GithubEntityOperations_ListEntityRunners_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.ListRunnersOptions))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityRunners_Call) Return(_a0 *github.Runners, _a1 *github.Response, _a2 error) *GithubEntityOperations_ListEntityRunners_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityRunners_Call) RunAndReturn(run func(context.Context, *github.ListRunnersOptions) (*github.Runners, *github.Response, error)) *GithubEntityOperations_ListEntityRunners_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// PingEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubEntityOperations) PingEntityHook(ctx context.Context, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PingEntityHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Response, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Response); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_PingEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PingEntityHook'
+type GithubEntityOperations_PingEntityHook_Call struct {
+ *mock.Call
+}
+
+// PingEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubEntityOperations_Expecter) PingEntityHook(ctx interface{}, id interface{}) *GithubEntityOperations_PingEntityHook_Call {
+ return &GithubEntityOperations_PingEntityHook_Call{Call: _e.mock.On("PingEntityHook", ctx, id)}
+}
+
+func (_c *GithubEntityOperations_PingEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubEntityOperations_PingEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_PingEntityHook_Call) Return(ret *github.Response, err error) *GithubEntityOperations_PingEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_PingEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Response, error)) *GithubEntityOperations_PingEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RateLimit provides a mock function with given fields: ctx
+func (_m *GithubEntityOperations) RateLimit(ctx context.Context) (*github.RateLimits, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RateLimit")
+ }
+
+ var r0 *github.RateLimits
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RateLimits, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RateLimits); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.RateLimits)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_RateLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RateLimit'
+type GithubEntityOperations_RateLimit_Call struct {
+ *mock.Call
+}
+
+// RateLimit is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubEntityOperations_Expecter) RateLimit(ctx interface{}) *GithubEntityOperations_RateLimit_Call {
+ return &GithubEntityOperations_RateLimit_Call{Call: _e.mock.On("RateLimit", ctx)}
+}
+
+func (_c *GithubEntityOperations_RateLimit_Call) Run(run func(ctx context.Context)) *GithubEntityOperations_RateLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_RateLimit_Call) Return(_a0 *github.RateLimits, _a1 error) *GithubEntityOperations_RateLimit_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *GithubEntityOperations_RateLimit_Call) RunAndReturn(run func(context.Context) (*github.RateLimits, error)) *GithubEntityOperations_RateLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RemoveEntityRunner provides a mock function with given fields: ctx, runnerID
+func (_m *GithubEntityOperations) RemoveEntityRunner(ctx context.Context, runnerID int64) error {
+ ret := _m.Called(ctx, runnerID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveEntityRunner")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
+ r0 = rf(ctx, runnerID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// GithubEntityOperations_RemoveEntityRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveEntityRunner'
+type GithubEntityOperations_RemoveEntityRunner_Call struct {
+ *mock.Call
+}
+
+// RemoveEntityRunner is a helper method to define mock.On call
+// - ctx context.Context
+// - runnerID int64
+func (_e *GithubEntityOperations_Expecter) RemoveEntityRunner(ctx interface{}, runnerID interface{}) *GithubEntityOperations_RemoveEntityRunner_Call {
+ return &GithubEntityOperations_RemoveEntityRunner_Call{Call: _e.mock.On("RemoveEntityRunner", ctx, runnerID)}
+}
+
+func (_c *GithubEntityOperations_RemoveEntityRunner_Call) Run(run func(ctx context.Context, runnerID int64)) *GithubEntityOperations_RemoveEntityRunner_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_RemoveEntityRunner_Call) Return(_a0 error) *GithubEntityOperations_RemoveEntityRunner_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubEntityOperations_RemoveEntityRunner_Call) RunAndReturn(run func(context.Context, int64) error) *GithubEntityOperations_RemoveEntityRunner_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewGithubEntityOperations creates a new instance of GithubEntityOperations. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewGithubEntityOperations(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *GithubEntityOperations {
+ mock := &GithubEntityOperations{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/runner/common/mocks/OrganizationHooks.go b/runner/common/mocks/OrganizationHooks.go
new file mode 100644
index 00000000..73528638
--- /dev/null
+++ b/runner/common/mocks/OrganizationHooks.go
@@ -0,0 +1,206 @@
+// Code generated by mockery v2.42.0. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ github "github.com/google/go-github/v72/github"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// OrganizationHooks is an autogenerated mock type for the OrganizationHooks type
+type OrganizationHooks struct {
+ mock.Mock
+}
+
+// CreateOrgHook provides a mock function with given fields: ctx, org, hook
+func (_m *OrganizationHooks) CreateOrgHook(ctx context.Context, org string, hook *github.Hook) (*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, org, hook)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateOrgHook")
+ }
+
+ var r0 *github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.Hook) (*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, org, hook)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.Hook) *github.Hook); ok {
+ r0 = rf(ctx, org, hook)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, *github.Hook) *github.Response); ok {
+ r1 = rf(ctx, org, hook)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, *github.Hook) error); ok {
+ r2 = rf(ctx, org, hook)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// DeleteOrgHook provides a mock function with given fields: ctx, org, id
+func (_m *OrganizationHooks) DeleteOrgHook(ctx context.Context, org string, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, org, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteOrgHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Response, error)); ok {
+ return rf(ctx, org, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) *github.Response); ok {
+ r0 = rf(ctx, org, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok {
+ r1 = rf(ctx, org, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GetOrgHook provides a mock function with given fields: ctx, org, id
+func (_m *OrganizationHooks) GetOrgHook(ctx context.Context, org string, id int64) (*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, org, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrgHook")
+ }
+
+ var r0 *github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, org, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) *github.Hook); ok {
+ r0 = rf(ctx, org, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64) *github.Response); ok {
+ r1 = rf(ctx, org, id)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, int64) error); ok {
+ r2 = rf(ctx, org, id)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// ListOrgHooks provides a mock function with given fields: ctx, org, opts
+func (_m *OrganizationHooks) ListOrgHooks(ctx context.Context, org string, opts *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, org, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListOrgHooks")
+ }
+
+ var r0 []*github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListOptions) ([]*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, org, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListOptions) []*github.Hook); ok {
+ r0 = rf(ctx, org, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, *github.ListOptions) *github.Response); ok {
+ r1 = rf(ctx, org, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, *github.ListOptions) error); ok {
+ r2 = rf(ctx, org, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// PingOrgHook provides a mock function with given fields: ctx, org, id
+func (_m *OrganizationHooks) PingOrgHook(ctx context.Context, org string, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, org, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PingOrgHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Response, error)); ok {
+ return rf(ctx, org, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) *github.Response); ok {
+ r0 = rf(ctx, org, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok {
+ r1 = rf(ctx, org, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewOrganizationHooks creates a new instance of OrganizationHooks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewOrganizationHooks(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *OrganizationHooks {
+ mock := &OrganizationHooks{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/runner/common/mocks/PoolManager.go b/runner/common/mocks/PoolManager.go
index e817407c..a1a62f4f 100644
--- a/runner/common/mocks/PoolManager.go
+++ b/runner/common/mocks/PoolManager.go
@@ -1,8 +1,10 @@
-// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
import (
+ context "context"
+
params "github.com/cloudbase/garm/params"
mock "github.com/stretchr/testify/mock"
)
@@ -12,24 +14,78 @@ type PoolManager struct {
mock.Mock
}
-// ForceDeleteRunner provides a mock function with given fields: runner
-func (_m *PoolManager) ForceDeleteRunner(runner params.Instance) error {
- ret := _m.Called(runner)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(params.Instance) error); ok {
- r0 = rf(runner)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+type PoolManager_Expecter struct {
+ mock *mock.Mock
}
-// GithubRunnerRegistrationToken provides a mock function with given fields:
+func (_m *PoolManager) EXPECT() *PoolManager_Expecter {
+ return &PoolManager_Expecter{mock: &_m.Mock}
+}
+
+// GetWebhookInfo provides a mock function with given fields: ctx
+func (_m *PoolManager) GetWebhookInfo(ctx context.Context) (params.HookInfo, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetWebhookInfo")
+ }
+
+ var r0 params.HookInfo
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (params.HookInfo, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) params.HookInfo); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(params.HookInfo)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PoolManager_GetWebhookInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetWebhookInfo'
+type PoolManager_GetWebhookInfo_Call struct {
+ *mock.Call
+}
+
+// GetWebhookInfo is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *PoolManager_Expecter) GetWebhookInfo(ctx interface{}) *PoolManager_GetWebhookInfo_Call {
+ return &PoolManager_GetWebhookInfo_Call{Call: _e.mock.On("GetWebhookInfo", ctx)}
+}
+
+func (_c *PoolManager_GetWebhookInfo_Call) Run(run func(ctx context.Context)) *PoolManager_GetWebhookInfo_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *PoolManager_GetWebhookInfo_Call) Return(_a0 params.HookInfo, _a1 error) *PoolManager_GetWebhookInfo_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManager_GetWebhookInfo_Call) RunAndReturn(run func(context.Context) (params.HookInfo, error)) *PoolManager_GetWebhookInfo_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GithubRunnerRegistrationToken provides a mock function with no fields
func (_m *PoolManager) GithubRunnerRegistrationToken() (string, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for GithubRunnerRegistrationToken")
+ }
+
var r0 string
var r1 error
if rf, ok := ret.Get(0).(func() (string, error)); ok {
@@ -50,10 +106,41 @@ func (_m *PoolManager) GithubRunnerRegistrationToken() (string, error) {
return r0, r1
}
+// PoolManager_GithubRunnerRegistrationToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GithubRunnerRegistrationToken'
+type PoolManager_GithubRunnerRegistrationToken_Call struct {
+ *mock.Call
+}
+
+// GithubRunnerRegistrationToken is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) GithubRunnerRegistrationToken() *PoolManager_GithubRunnerRegistrationToken_Call {
+ return &PoolManager_GithubRunnerRegistrationToken_Call{Call: _e.mock.On("GithubRunnerRegistrationToken")}
+}
+
+func (_c *PoolManager_GithubRunnerRegistrationToken_Call) Run(run func()) *PoolManager_GithubRunnerRegistrationToken_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_GithubRunnerRegistrationToken_Call) Return(_a0 string, _a1 error) *PoolManager_GithubRunnerRegistrationToken_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManager_GithubRunnerRegistrationToken_Call) RunAndReturn(run func() (string, error)) *PoolManager_GithubRunnerRegistrationToken_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// HandleWorkflowJob provides a mock function with given fields: job
func (_m *PoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
ret := _m.Called(job)
+ if len(ret) == 0 {
+ panic("no return value specified for HandleWorkflowJob")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(params.WorkflowJob) error); ok {
r0 = rf(job)
@@ -64,10 +151,42 @@ func (_m *PoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
return r0
}
-// ID provides a mock function with given fields:
+// PoolManager_HandleWorkflowJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleWorkflowJob'
+type PoolManager_HandleWorkflowJob_Call struct {
+ *mock.Call
+}
+
+// HandleWorkflowJob is a helper method to define mock.On call
+// - job params.WorkflowJob
+func (_e *PoolManager_Expecter) HandleWorkflowJob(job interface{}) *PoolManager_HandleWorkflowJob_Call {
+ return &PoolManager_HandleWorkflowJob_Call{Call: _e.mock.On("HandleWorkflowJob", job)}
+}
+
+func (_c *PoolManager_HandleWorkflowJob_Call) Run(run func(job params.WorkflowJob)) *PoolManager_HandleWorkflowJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.WorkflowJob))
+ })
+ return _c
+}
+
+func (_c *PoolManager_HandleWorkflowJob_Call) Return(_a0 error) *PoolManager_HandleWorkflowJob_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_HandleWorkflowJob_Call) RunAndReturn(run func(params.WorkflowJob) error) *PoolManager_HandleWorkflowJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ID provides a mock function with no fields
func (_m *PoolManager) ID() string {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for ID")
+ }
+
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
@@ -78,24 +197,187 @@ func (_m *PoolManager) ID() string {
return r0
}
-// RefreshState provides a mock function with given fields: param
-func (_m *PoolManager) RefreshState(param params.UpdatePoolStateParams) error {
- ret := _m.Called(param)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(params.UpdatePoolStateParams) error); ok {
- r0 = rf(param)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+// PoolManager_ID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ID'
+type PoolManager_ID_Call struct {
+ *mock.Call
}
-// Start provides a mock function with given fields:
+// ID is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) ID() *PoolManager_ID_Call {
+ return &PoolManager_ID_Call{Call: _e.mock.On("ID")}
+}
+
+func (_c *PoolManager_ID_Call) Run(run func()) *PoolManager_ID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_ID_Call) Return(_a0 string) *PoolManager_ID_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_ID_Call) RunAndReturn(run func() string) *PoolManager_ID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// InstallWebhook provides a mock function with given fields: ctx, param
+func (_m *PoolManager) InstallWebhook(ctx context.Context, param params.InstallWebhookParams) (params.HookInfo, error) {
+ ret := _m.Called(ctx, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for InstallWebhook")
+ }
+
+ var r0 params.HookInfo
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.InstallWebhookParams) (params.HookInfo, error)); ok {
+ return rf(ctx, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.InstallWebhookParams) params.HookInfo); ok {
+ r0 = rf(ctx, param)
+ } else {
+ r0 = ret.Get(0).(params.HookInfo)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.InstallWebhookParams) error); ok {
+ r1 = rf(ctx, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PoolManager_InstallWebhook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InstallWebhook'
+type PoolManager_InstallWebhook_Call struct {
+ *mock.Call
+}
+
+// InstallWebhook is a helper method to define mock.On call
+// - ctx context.Context
+// - param params.InstallWebhookParams
+func (_e *PoolManager_Expecter) InstallWebhook(ctx interface{}, param interface{}) *PoolManager_InstallWebhook_Call {
+ return &PoolManager_InstallWebhook_Call{Call: _e.mock.On("InstallWebhook", ctx, param)}
+}
+
+func (_c *PoolManager_InstallWebhook_Call) Run(run func(ctx context.Context, param params.InstallWebhookParams)) *PoolManager_InstallWebhook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.InstallWebhookParams))
+ })
+ return _c
+}
+
+func (_c *PoolManager_InstallWebhook_Call) Return(_a0 params.HookInfo, _a1 error) *PoolManager_InstallWebhook_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManager_InstallWebhook_Call) RunAndReturn(run func(context.Context, params.InstallWebhookParams) (params.HookInfo, error)) *PoolManager_InstallWebhook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RootCABundle provides a mock function with no fields
+func (_m *PoolManager) RootCABundle() (params.CertificateBundle, error) {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for RootCABundle")
+ }
+
+ var r0 params.CertificateBundle
+ var r1 error
+ if rf, ok := ret.Get(0).(func() (params.CertificateBundle, error)); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() params.CertificateBundle); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(params.CertificateBundle)
+ }
+
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PoolManager_RootCABundle_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RootCABundle'
+type PoolManager_RootCABundle_Call struct {
+ *mock.Call
+}
+
+// RootCABundle is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) RootCABundle() *PoolManager_RootCABundle_Call {
+ return &PoolManager_RootCABundle_Call{Call: _e.mock.On("RootCABundle")}
+}
+
+func (_c *PoolManager_RootCABundle_Call) Run(run func()) *PoolManager_RootCABundle_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_RootCABundle_Call) Return(_a0 params.CertificateBundle, _a1 error) *PoolManager_RootCABundle_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManager_RootCABundle_Call) RunAndReturn(run func() (params.CertificateBundle, error)) *PoolManager_RootCABundle_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SetPoolRunningState provides a mock function with given fields: isRunning, failureReason
+func (_m *PoolManager) SetPoolRunningState(isRunning bool, failureReason string) {
+ _m.Called(isRunning, failureReason)
+}
+
+// PoolManager_SetPoolRunningState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetPoolRunningState'
+type PoolManager_SetPoolRunningState_Call struct {
+ *mock.Call
+}
+
+// SetPoolRunningState is a helper method to define mock.On call
+// - isRunning bool
+// - failureReason string
+func (_e *PoolManager_Expecter) SetPoolRunningState(isRunning interface{}, failureReason interface{}) *PoolManager_SetPoolRunningState_Call {
+ return &PoolManager_SetPoolRunningState_Call{Call: _e.mock.On("SetPoolRunningState", isRunning, failureReason)}
+}
+
+func (_c *PoolManager_SetPoolRunningState_Call) Run(run func(isRunning bool, failureReason string)) *PoolManager_SetPoolRunningState_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(bool), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *PoolManager_SetPoolRunningState_Call) Return() *PoolManager_SetPoolRunningState_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *PoolManager_SetPoolRunningState_Call) RunAndReturn(run func(bool, string)) *PoolManager_SetPoolRunningState_Call {
+ _c.Run(run)
+ return _c
+}
+
+// Start provides a mock function with no fields
func (_m *PoolManager) Start() error {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Start")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
@@ -106,10 +388,41 @@ func (_m *PoolManager) Start() error {
return r0
}
-// Status provides a mock function with given fields:
+// PoolManager_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
+type PoolManager_Start_Call struct {
+ *mock.Call
+}
+
+// Start is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) Start() *PoolManager_Start_Call {
+ return &PoolManager_Start_Call{Call: _e.mock.On("Start")}
+}
+
+func (_c *PoolManager_Start_Call) Run(run func()) *PoolManager_Start_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_Start_Call) Return(_a0 error) *PoolManager_Start_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_Start_Call) RunAndReturn(run func() error) *PoolManager_Start_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Status provides a mock function with no fields
func (_m *PoolManager) Status() params.PoolManagerStatus {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Status")
+ }
+
var r0 params.PoolManagerStatus
if rf, ok := ret.Get(0).(func() params.PoolManagerStatus); ok {
r0 = rf()
@@ -120,10 +433,41 @@ func (_m *PoolManager) Status() params.PoolManagerStatus {
return r0
}
-// Stop provides a mock function with given fields:
+// PoolManager_Status_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Status'
+type PoolManager_Status_Call struct {
+ *mock.Call
+}
+
+// Status is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) Status() *PoolManager_Status_Call {
+ return &PoolManager_Status_Call{Call: _e.mock.On("Status")}
+}
+
+func (_c *PoolManager_Status_Call) Run(run func()) *PoolManager_Status_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_Status_Call) Return(_a0 params.PoolManagerStatus) *PoolManager_Status_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_Status_Call) RunAndReturn(run func() params.PoolManagerStatus) *PoolManager_Status_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Stop provides a mock function with no fields
func (_m *PoolManager) Stop() error {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Stop")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
@@ -134,10 +478,87 @@ func (_m *PoolManager) Stop() error {
return r0
}
-// Wait provides a mock function with given fields:
+// PoolManager_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'
+type PoolManager_Stop_Call struct {
+ *mock.Call
+}
+
+// Stop is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) Stop() *PoolManager_Stop_Call {
+ return &PoolManager_Stop_Call{Call: _e.mock.On("Stop")}
+}
+
+func (_c *PoolManager_Stop_Call) Run(run func()) *PoolManager_Stop_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_Stop_Call) Return(_a0 error) *PoolManager_Stop_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_Stop_Call) RunAndReturn(run func() error) *PoolManager_Stop_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UninstallWebhook provides a mock function with given fields: ctx
+func (_m *PoolManager) UninstallWebhook(ctx context.Context) error {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UninstallWebhook")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// PoolManager_UninstallWebhook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UninstallWebhook'
+type PoolManager_UninstallWebhook_Call struct {
+ *mock.Call
+}
+
+// UninstallWebhook is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *PoolManager_Expecter) UninstallWebhook(ctx interface{}) *PoolManager_UninstallWebhook_Call {
+ return &PoolManager_UninstallWebhook_Call{Call: _e.mock.On("UninstallWebhook", ctx)}
+}
+
+func (_c *PoolManager_UninstallWebhook_Call) Run(run func(ctx context.Context)) *PoolManager_UninstallWebhook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *PoolManager_UninstallWebhook_Call) Return(_a0 error) *PoolManager_UninstallWebhook_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_UninstallWebhook_Call) RunAndReturn(run func(context.Context) error) *PoolManager_UninstallWebhook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Wait provides a mock function with no fields
func (_m *PoolManager) Wait() error {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Wait")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
@@ -148,10 +569,41 @@ func (_m *PoolManager) Wait() error {
return r0
}
-// WebhookSecret provides a mock function with given fields:
+// PoolManager_Wait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Wait'
+type PoolManager_Wait_Call struct {
+ *mock.Call
+}
+
+// Wait is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) Wait() *PoolManager_Wait_Call {
+ return &PoolManager_Wait_Call{Call: _e.mock.On("Wait")}
+}
+
+func (_c *PoolManager_Wait_Call) Run(run func()) *PoolManager_Wait_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_Wait_Call) Return(_a0 error) *PoolManager_Wait_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_Wait_Call) RunAndReturn(run func() error) *PoolManager_Wait_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// WebhookSecret provides a mock function with no fields
func (_m *PoolManager) WebhookSecret() string {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for WebhookSecret")
+ }
+
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
@@ -162,6 +614,33 @@ func (_m *PoolManager) WebhookSecret() string {
return r0
}
+// PoolManager_WebhookSecret_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WebhookSecret'
+type PoolManager_WebhookSecret_Call struct {
+ *mock.Call
+}
+
+// WebhookSecret is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) WebhookSecret() *PoolManager_WebhookSecret_Call {
+ return &PoolManager_WebhookSecret_Call{Call: _e.mock.On("WebhookSecret")}
+}
+
+func (_c *PoolManager_WebhookSecret_Call) Run(run func()) *PoolManager_WebhookSecret_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_WebhookSecret_Call) Return(_a0 string) *PoolManager_WebhookSecret_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_WebhookSecret_Call) RunAndReturn(run func() string) *PoolManager_WebhookSecret_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// NewPoolManager creates a new instance of PoolManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewPoolManager(t interface {
diff --git a/runner/common/mocks/Provider.go b/runner/common/mocks/Provider.go
index e5157e0f..5bf94a10 100644
--- a/runner/common/mocks/Provider.go
+++ b/runner/common/mocks/Provider.go
@@ -1,11 +1,14 @@
-// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
+ common "github.com/cloudbase/garm/runner/common"
+
garm_provider_commonparams "github.com/cloudbase/garm-provider-common/params"
+
mock "github.com/stretchr/testify/mock"
params "github.com/cloudbase/garm/params"
@@ -16,10 +19,22 @@ type Provider struct {
mock.Mock
}
-// AsParams provides a mock function with given fields:
+type Provider_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *Provider) EXPECT() *Provider_Expecter {
+ return &Provider_Expecter{mock: &_m.Mock}
+}
+
+// AsParams provides a mock function with no fields
func (_m *Provider) AsParams() params.Provider {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for AsParams")
+ }
+
var r0 params.Provider
if rf, ok := ret.Get(0).(func() params.Provider); ok {
r0 = rf()
@@ -30,23 +45,54 @@ func (_m *Provider) AsParams() params.Provider {
return r0
}
-// CreateInstance provides a mock function with given fields: ctx, bootstrapParams
-func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_provider_commonparams.BootstrapInstance) (garm_provider_commonparams.ProviderInstance, error) {
- ret := _m.Called(ctx, bootstrapParams)
+// Provider_AsParams_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AsParams'
+type Provider_AsParams_Call struct {
+ *mock.Call
+}
+
+// AsParams is a helper method to define mock.On call
+func (_e *Provider_Expecter) AsParams() *Provider_AsParams_Call {
+ return &Provider_AsParams_Call{Call: _e.mock.On("AsParams")}
+}
+
+func (_c *Provider_AsParams_Call) Run(run func()) *Provider_AsParams_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Provider_AsParams_Call) Return(_a0 params.Provider) *Provider_AsParams_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_AsParams_Call) RunAndReturn(run func() params.Provider) *Provider_AsParams_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateInstance provides a mock function with given fields: ctx, bootstrapParams, createInstanceParams
+func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_provider_commonparams.BootstrapInstance, createInstanceParams common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error) {
+ ret := _m.Called(ctx, bootstrapParams, createInstanceParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateInstance")
+ }
var r0 garm_provider_commonparams.ProviderInstance
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance) (garm_provider_commonparams.ProviderInstance, error)); ok {
- return rf(ctx, bootstrapParams)
+ if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error)); ok {
+ return rf(ctx, bootstrapParams, createInstanceParams)
}
- if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance) garm_provider_commonparams.ProviderInstance); ok {
- r0 = rf(ctx, bootstrapParams)
+ if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) garm_provider_commonparams.ProviderInstance); ok {
+ r0 = rf(ctx, bootstrapParams, createInstanceParams)
} else {
r0 = ret.Get(0).(garm_provider_commonparams.ProviderInstance)
}
- if rf, ok := ret.Get(1).(func(context.Context, garm_provider_commonparams.BootstrapInstance) error); ok {
- r1 = rf(ctx, bootstrapParams)
+ if rf, ok := ret.Get(1).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) error); ok {
+ r1 = rf(ctx, bootstrapParams, createInstanceParams)
} else {
r1 = ret.Error(1)
}
@@ -54,13 +100,47 @@ func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_pro
return r0, r1
}
-// DeleteInstance provides a mock function with given fields: ctx, instance
-func (_m *Provider) DeleteInstance(ctx context.Context, instance string) error {
- ret := _m.Called(ctx, instance)
+// Provider_CreateInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateInstance'
+type Provider_CreateInstance_Call struct {
+ *mock.Call
+}
+
+// CreateInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - bootstrapParams garm_provider_commonparams.BootstrapInstance
+// - createInstanceParams common.CreateInstanceParams
+func (_e *Provider_Expecter) CreateInstance(ctx interface{}, bootstrapParams interface{}, createInstanceParams interface{}) *Provider_CreateInstance_Call {
+ return &Provider_CreateInstance_Call{Call: _e.mock.On("CreateInstance", ctx, bootstrapParams, createInstanceParams)}
+}
+
+func (_c *Provider_CreateInstance_Call) Run(run func(ctx context.Context, bootstrapParams garm_provider_commonparams.BootstrapInstance, createInstanceParams common.CreateInstanceParams)) *Provider_CreateInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(garm_provider_commonparams.BootstrapInstance), args[2].(common.CreateInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Provider_CreateInstance_Call) Return(_a0 garm_provider_commonparams.ProviderInstance, _a1 error) *Provider_CreateInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Provider_CreateInstance_Call) RunAndReturn(run func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error)) *Provider_CreateInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteInstance provides a mock function with given fields: ctx, instance, deleteInstanceParams
+func (_m *Provider) DeleteInstance(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams) error {
+ ret := _m.Called(ctx, instance, deleteInstanceParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteInstance")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
- r0 = rf(ctx, instance)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.DeleteInstanceParams) error); ok {
+ r0 = rf(ctx, instance, deleteInstanceParams)
} else {
r0 = ret.Error(0)
}
@@ -68,23 +148,102 @@ func (_m *Provider) DeleteInstance(ctx context.Context, instance string) error {
return r0
}
-// GetInstance provides a mock function with given fields: ctx, instance
-func (_m *Provider) GetInstance(ctx context.Context, instance string) (garm_provider_commonparams.ProviderInstance, error) {
- ret := _m.Called(ctx, instance)
+// Provider_DeleteInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteInstance'
+type Provider_DeleteInstance_Call struct {
+ *mock.Call
+}
+
+// DeleteInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - deleteInstanceParams common.DeleteInstanceParams
+func (_e *Provider_Expecter) DeleteInstance(ctx interface{}, instance interface{}, deleteInstanceParams interface{}) *Provider_DeleteInstance_Call {
+ return &Provider_DeleteInstance_Call{Call: _e.mock.On("DeleteInstance", ctx, instance, deleteInstanceParams)}
+}
+
+func (_c *Provider_DeleteInstance_Call) Run(run func(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams)) *Provider_DeleteInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.DeleteInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Provider_DeleteInstance_Call) Return(_a0 error) *Provider_DeleteInstance_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_DeleteInstance_Call) RunAndReturn(run func(context.Context, string, common.DeleteInstanceParams) error) *Provider_DeleteInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DisableJITConfig provides a mock function with no fields
+func (_m *Provider) DisableJITConfig() bool {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for DisableJITConfig")
+ }
+
+ var r0 bool
+ if rf, ok := ret.Get(0).(func() bool); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ return r0
+}
+
+// Provider_DisableJITConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DisableJITConfig'
+type Provider_DisableJITConfig_Call struct {
+ *mock.Call
+}
+
+// DisableJITConfig is a helper method to define mock.On call
+func (_e *Provider_Expecter) DisableJITConfig() *Provider_DisableJITConfig_Call {
+ return &Provider_DisableJITConfig_Call{Call: _e.mock.On("DisableJITConfig")}
+}
+
+func (_c *Provider_DisableJITConfig_Call) Run(run func()) *Provider_DisableJITConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Provider_DisableJITConfig_Call) Return(_a0 bool) *Provider_DisableJITConfig_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_DisableJITConfig_Call) RunAndReturn(run func() bool) *Provider_DisableJITConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetInstance provides a mock function with given fields: ctx, instance, getInstanceParams
+func (_m *Provider) GetInstance(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error) {
+ ret := _m.Called(ctx, instance, getInstanceParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetInstance")
+ }
var r0 garm_provider_commonparams.ProviderInstance
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (garm_provider_commonparams.ProviderInstance, error)); ok {
- return rf(ctx, instance)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error)); ok {
+ return rf(ctx, instance, getInstanceParams)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) garm_provider_commonparams.ProviderInstance); ok {
- r0 = rf(ctx, instance)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.GetInstanceParams) garm_provider_commonparams.ProviderInstance); ok {
+ r0 = rf(ctx, instance, getInstanceParams)
} else {
r0 = ret.Get(0).(garm_provider_commonparams.ProviderInstance)
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, instance)
+ if rf, ok := ret.Get(1).(func(context.Context, string, common.GetInstanceParams) error); ok {
+ r1 = rf(ctx, instance, getInstanceParams)
} else {
r1 = ret.Error(1)
}
@@ -92,25 +251,59 @@ func (_m *Provider) GetInstance(ctx context.Context, instance string) (garm_prov
return r0, r1
}
-// ListInstances provides a mock function with given fields: ctx, poolID
-func (_m *Provider) ListInstances(ctx context.Context, poolID string) ([]garm_provider_commonparams.ProviderInstance, error) {
- ret := _m.Called(ctx, poolID)
+// Provider_GetInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInstance'
+type Provider_GetInstance_Call struct {
+ *mock.Call
+}
+
+// GetInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - getInstanceParams common.GetInstanceParams
+func (_e *Provider_Expecter) GetInstance(ctx interface{}, instance interface{}, getInstanceParams interface{}) *Provider_GetInstance_Call {
+ return &Provider_GetInstance_Call{Call: _e.mock.On("GetInstance", ctx, instance, getInstanceParams)}
+}
+
+func (_c *Provider_GetInstance_Call) Run(run func(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams)) *Provider_GetInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.GetInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Provider_GetInstance_Call) Return(_a0 garm_provider_commonparams.ProviderInstance, _a1 error) *Provider_GetInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Provider_GetInstance_Call) RunAndReturn(run func(context.Context, string, common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error)) *Provider_GetInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListInstances provides a mock function with given fields: ctx, poolID, listInstancesParams
+func (_m *Provider) ListInstances(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error) {
+ ret := _m.Called(ctx, poolID, listInstancesParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListInstances")
+ }
var r0 []garm_provider_commonparams.ProviderInstance
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]garm_provider_commonparams.ProviderInstance, error)); ok {
- return rf(ctx, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error)); ok {
+ return rf(ctx, poolID, listInstancesParams)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) []garm_provider_commonparams.ProviderInstance); ok {
- r0 = rf(ctx, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.ListInstancesParams) []garm_provider_commonparams.ProviderInstance); ok {
+ r0 = rf(ctx, poolID, listInstancesParams)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]garm_provider_commonparams.ProviderInstance)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, poolID)
+ if rf, ok := ret.Get(1).(func(context.Context, string, common.ListInstancesParams) error); ok {
+ r1 = rf(ctx, poolID, listInstancesParams)
} else {
r1 = ret.Error(1)
}
@@ -118,13 +311,47 @@ func (_m *Provider) ListInstances(ctx context.Context, poolID string) ([]garm_pr
return r0, r1
}
-// RemoveAllInstances provides a mock function with given fields: ctx
-func (_m *Provider) RemoveAllInstances(ctx context.Context) error {
- ret := _m.Called(ctx)
+// Provider_ListInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListInstances'
+type Provider_ListInstances_Call struct {
+ *mock.Call
+}
+
+// ListInstances is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+// - listInstancesParams common.ListInstancesParams
+func (_e *Provider_Expecter) ListInstances(ctx interface{}, poolID interface{}, listInstancesParams interface{}) *Provider_ListInstances_Call {
+ return &Provider_ListInstances_Call{Call: _e.mock.On("ListInstances", ctx, poolID, listInstancesParams)}
+}
+
+func (_c *Provider_ListInstances_Call) Run(run func(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams)) *Provider_ListInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.ListInstancesParams))
+ })
+ return _c
+}
+
+func (_c *Provider_ListInstances_Call) Return(_a0 []garm_provider_commonparams.ProviderInstance, _a1 error) *Provider_ListInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Provider_ListInstances_Call) RunAndReturn(run func(context.Context, string, common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error)) *Provider_ListInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RemoveAllInstances provides a mock function with given fields: ctx, removeAllInstancesParams
+func (_m *Provider) RemoveAllInstances(ctx context.Context, removeAllInstancesParams common.RemoveAllInstancesParams) error {
+ ret := _m.Called(ctx, removeAllInstancesParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveAllInstances")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context) error); ok {
- r0 = rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, common.RemoveAllInstancesParams) error); ok {
+ r0 = rf(ctx, removeAllInstancesParams)
} else {
r0 = ret.Error(0)
}
@@ -132,13 +359,46 @@ func (_m *Provider) RemoveAllInstances(ctx context.Context) error {
return r0
}
-// Start provides a mock function with given fields: ctx, instance
-func (_m *Provider) Start(ctx context.Context, instance string) error {
- ret := _m.Called(ctx, instance)
+// Provider_RemoveAllInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveAllInstances'
+type Provider_RemoveAllInstances_Call struct {
+ *mock.Call
+}
+
+// RemoveAllInstances is a helper method to define mock.On call
+// - ctx context.Context
+// - removeAllInstancesParams common.RemoveAllInstancesParams
+func (_e *Provider_Expecter) RemoveAllInstances(ctx interface{}, removeAllInstancesParams interface{}) *Provider_RemoveAllInstances_Call {
+ return &Provider_RemoveAllInstances_Call{Call: _e.mock.On("RemoveAllInstances", ctx, removeAllInstancesParams)}
+}
+
+func (_c *Provider_RemoveAllInstances_Call) Run(run func(ctx context.Context, removeAllInstancesParams common.RemoveAllInstancesParams)) *Provider_RemoveAllInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(common.RemoveAllInstancesParams))
+ })
+ return _c
+}
+
+func (_c *Provider_RemoveAllInstances_Call) Return(_a0 error) *Provider_RemoveAllInstances_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_RemoveAllInstances_Call) RunAndReturn(run func(context.Context, common.RemoveAllInstancesParams) error) *Provider_RemoveAllInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Start provides a mock function with given fields: ctx, instance, startParams
+func (_m *Provider) Start(ctx context.Context, instance string, startParams common.StartParams) error {
+ ret := _m.Called(ctx, instance, startParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Start")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
- r0 = rf(ctx, instance)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.StartParams) error); ok {
+ r0 = rf(ctx, instance, startParams)
} else {
r0 = ret.Error(0)
}
@@ -146,13 +406,47 @@ func (_m *Provider) Start(ctx context.Context, instance string) error {
return r0
}
-// Stop provides a mock function with given fields: ctx, instance, force
-func (_m *Provider) Stop(ctx context.Context, instance string, force bool) error {
- ret := _m.Called(ctx, instance, force)
+// Provider_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
+type Provider_Start_Call struct {
+ *mock.Call
+}
+
+// Start is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - startParams common.StartParams
+func (_e *Provider_Expecter) Start(ctx interface{}, instance interface{}, startParams interface{}) *Provider_Start_Call {
+ return &Provider_Start_Call{Call: _e.mock.On("Start", ctx, instance, startParams)}
+}
+
+func (_c *Provider_Start_Call) Run(run func(ctx context.Context, instance string, startParams common.StartParams)) *Provider_Start_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.StartParams))
+ })
+ return _c
+}
+
+func (_c *Provider_Start_Call) Return(_a0 error) *Provider_Start_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_Start_Call) RunAndReturn(run func(context.Context, string, common.StartParams) error) *Provider_Start_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Stop provides a mock function with given fields: ctx, instance, stopParams
+func (_m *Provider) Stop(ctx context.Context, instance string, stopParams common.StopParams) error {
+ ret := _m.Called(ctx, instance, stopParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Stop")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok {
- r0 = rf(ctx, instance, force)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.StopParams) error); ok {
+ r0 = rf(ctx, instance, stopParams)
} else {
r0 = ret.Error(0)
}
@@ -160,6 +454,36 @@ func (_m *Provider) Stop(ctx context.Context, instance string, force bool) error
return r0
}
+// Provider_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'
+type Provider_Stop_Call struct {
+ *mock.Call
+}
+
+// Stop is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - stopParams common.StopParams
+func (_e *Provider_Expecter) Stop(ctx interface{}, instance interface{}, stopParams interface{}) *Provider_Stop_Call {
+ return &Provider_Stop_Call{Call: _e.mock.On("Stop", ctx, instance, stopParams)}
+}
+
+func (_c *Provider_Stop_Call) Run(run func(ctx context.Context, instance string, stopParams common.StopParams)) *Provider_Stop_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.StopParams))
+ })
+ return _c
+}
+
+func (_c *Provider_Stop_Call) Return(_a0 error) *Provider_Stop_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_Stop_Call) RunAndReturn(run func(context.Context, string, common.StopParams) error) *Provider_Stop_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewProvider(t interface {
diff --git a/runner/common/mocks/RateLimitClient.go b/runner/common/mocks/RateLimitClient.go
new file mode 100644
index 00000000..b7e52f71
--- /dev/null
+++ b/runner/common/mocks/RateLimitClient.go
@@ -0,0 +1,95 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ github "github.com/google/go-github/v72/github"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// RateLimitClient is an autogenerated mock type for the RateLimitClient type
+type RateLimitClient struct {
+ mock.Mock
+}
+
+type RateLimitClient_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *RateLimitClient) EXPECT() *RateLimitClient_Expecter {
+ return &RateLimitClient_Expecter{mock: &_m.Mock}
+}
+
+// RateLimit provides a mock function with given fields: ctx
+func (_m *RateLimitClient) RateLimit(ctx context.Context) (*github.RateLimits, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RateLimit")
+ }
+
+ var r0 *github.RateLimits
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RateLimits, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RateLimits); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.RateLimits)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// RateLimitClient_RateLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RateLimit'
+type RateLimitClient_RateLimit_Call struct {
+ *mock.Call
+}
+
+// RateLimit is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *RateLimitClient_Expecter) RateLimit(ctx interface{}) *RateLimitClient_RateLimit_Call {
+ return &RateLimitClient_RateLimit_Call{Call: _e.mock.On("RateLimit", ctx)}
+}
+
+func (_c *RateLimitClient_RateLimit_Call) Run(run func(ctx context.Context)) *RateLimitClient_RateLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *RateLimitClient_RateLimit_Call) Return(_a0 *github.RateLimits, _a1 error) *RateLimitClient_RateLimit_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *RateLimitClient_RateLimit_Call) RunAndReturn(run func(context.Context) (*github.RateLimits, error)) *RateLimitClient_RateLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewRateLimitClient creates a new instance of RateLimitClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewRateLimitClient(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *RateLimitClient {
+ mock := &RateLimitClient{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/runner/common/mocks/RepositoryHooks.go b/runner/common/mocks/RepositoryHooks.go
new file mode 100644
index 00000000..3f38915e
--- /dev/null
+++ b/runner/common/mocks/RepositoryHooks.go
@@ -0,0 +1,206 @@
+// Code generated by mockery v2.42.0. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ github "github.com/google/go-github/v72/github"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// RepositoryHooks is an autogenerated mock type for the RepositoryHooks type
+type RepositoryHooks struct {
+ mock.Mock
+}
+
+// CreateRepoHook provides a mock function with given fields: ctx, owner, repo, hook
+func (_m *RepositoryHooks) CreateRepoHook(ctx context.Context, owner string, repo string, hook *github.Hook) (*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, hook)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateRepoHook")
+ }
+
+ var r0 *github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.Hook) (*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, owner, repo, hook)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.Hook) *github.Hook); ok {
+ r0 = rf(ctx, owner, repo, hook)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, *github.Hook) *github.Response); ok {
+ r1 = rf(ctx, owner, repo, hook)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, string, *github.Hook) error); ok {
+ r2 = rf(ctx, owner, repo, hook)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// DeleteRepoHook provides a mock function with given fields: ctx, owner, repo, id
+func (_m *RepositoryHooks) DeleteRepoHook(ctx context.Context, owner string, repo string, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteRepoHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*github.Response, error)); ok {
+ return rf(ctx, owner, repo, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *github.Response); ok {
+ r0 = rf(ctx, owner, repo, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
+ r1 = rf(ctx, owner, repo, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GetRepoHook provides a mock function with given fields: ctx, owner, repo, id
+func (_m *RepositoryHooks) GetRepoHook(ctx context.Context, owner string, repo string, id int64) (*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepoHook")
+ }
+
+ var r0 *github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, owner, repo, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *github.Hook); ok {
+ r0 = rf(ctx, owner, repo, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) *github.Response); ok {
+ r1 = rf(ctx, owner, repo, id)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, string, int64) error); ok {
+ r2 = rf(ctx, owner, repo, id)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// ListRepoHooks provides a mock function with given fields: ctx, owner, repo, opts
+func (_m *RepositoryHooks) ListRepoHooks(ctx context.Context, owner string, repo string, opts *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListRepoHooks")
+ }
+
+ var r0 []*github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) ([]*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, owner, repo, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) []*github.Hook); ok {
+ r0 = rf(ctx, owner, repo, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, *github.ListOptions) *github.Response); ok {
+ r1 = rf(ctx, owner, repo, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, string, *github.ListOptions) error); ok {
+ r2 = rf(ctx, owner, repo, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// PingRepoHook provides a mock function with given fields: ctx, owner, repo, id
+func (_m *RepositoryHooks) PingRepoHook(ctx context.Context, owner string, repo string, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PingRepoHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*github.Response, error)); ok {
+ return rf(ctx, owner, repo, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *github.Response); ok {
+ r0 = rf(ctx, owner, repo, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
+ r1 = rf(ctx, owner, repo, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewRepositoryHooks creates a new instance of RepositoryHooks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewRepositoryHooks(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *RepositoryHooks {
+ mock := &RepositoryHooks{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/runner/common/params.go b/runner/common/params.go
new file mode 100644
index 00000000..fdf73dbc
--- /dev/null
+++ b/runner/common/params.go
@@ -0,0 +1,88 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import "github.com/cloudbase/garm/params"
+
+// Constants used for the provider interface version.
+const (
+ Version010 = "v0.1.0"
+ Version011 = "v0.1.1"
+)
+
+// Each struct is a wrapper for the actual parameters struct for a specific version.
+// Version 0.1.0 doesn't have any specific parameters, so there is no need for a struct for it.
+type CreateInstanceParams struct {
+ CreateInstanceV011 CreateInstanceV011Params
+}
+
+type DeleteInstanceParams struct {
+ DeleteInstanceV011 DeleteInstanceV011Params
+}
+
+type GetInstanceParams struct {
+ GetInstanceV011 GetInstanceV011Params
+}
+
+type ListInstancesParams struct {
+ ListInstancesV011 ListInstancesV011Params
+}
+
+type RemoveAllInstancesParams struct {
+ RemoveAllInstancesV011 RemoveAllInstancesV011Params
+}
+
+type StopParams struct {
+ StopV011 StopV011Params
+}
+
+type StartParams struct {
+ StartV011 StartV011Params
+}
+
+// Struct for the base provider parameters.
+type ProviderBaseParams struct {
+ PoolInfo params.Pool
+ ControllerInfo params.ControllerInfo
+}
+
+// Structs for version v0.1.1.
+type CreateInstanceV011Params struct {
+ ProviderBaseParams
+}
+
+type DeleteInstanceV011Params struct {
+ ProviderBaseParams
+}
+
+type GetInstanceV011Params struct {
+ ProviderBaseParams
+}
+
+type ListInstancesV011Params struct {
+ ProviderBaseParams
+}
+
+type RemoveAllInstancesV011Params struct {
+ ProviderBaseParams
+}
+
+type StopV011Params struct {
+ ProviderBaseParams
+}
+
+type StartV011Params struct {
+ ProviderBaseParams
+}
diff --git a/runner/common/pool.go b/runner/common/pool.go
index 39d8d3fa..4cb86a62 100644
--- a/runner/common/pool.go
+++ b/runner/common/pool.go
@@ -15,6 +15,7 @@
package common
import (
+ "context"
"time"
"github.com/cloudbase/garm/params"
@@ -26,28 +27,54 @@ const (
PoolReapTimeoutInterval = 5 * time.Minute
// Temporary tools download token is valid for 1 hour by default.
// There is no point in making an API call to get available tools, for every runner
- // we spin up. We cache the tools for one minute. This should save us a lot of API calls
+ // we spin up. We cache the tools for 5 minutes. This should save us a lot of API calls
// in cases where we have a lot of runners spin up at the same time.
- PoolToolUpdateInterval = 1 * time.Minute
+ PoolToolUpdateInterval = 5 * time.Minute
// BackoffTimer is the time we wait before attempting to make another request
// to the github API.
BackoffTimer = 1 * time.Minute
)
-//go:generate mockery --all
+//go:generate go run github.com/vektra/mockery/v2@latest
type PoolManager interface {
+ // ID returns the ID of the entity (repo, org, enterprise)
ID() string
+ // WebhookSecret returns the unencrypted webhook secret associated with the webhook installed
+ // in GitHub for GARM. For GARM to receive webhook events for an entity, either the operator or
+ // GARM will have to create a webhook in GitHub which points to the GARM API server. To authenticate
+ // the webhook, a webhook secret is used. This function returns that secret.
WebhookSecret() string
+ // GithubRunnerRegistrationToken returns a new registration token for a github runner. This is used
+ // for GHES installations that have not yet upgraded to a version >= 3.10. Starting with 3.10, we use
+ // just-in-time runners, which no longer require exposing a runner registration token.
GithubRunnerRegistrationToken() (string, error)
+ // HandleWorkflowJob handles a workflow job meant for a particular entity. When a webhook is fired for
+ // a repo, org or enterprise, we determine the destination of that webhook, retrieve the pool manager
+ // for it and call this function with the WorkflowJob as a parameter.
HandleWorkflowJob(job params.WorkflowJob) error
- RefreshState(param params.UpdatePoolStateParams) error
- ForceDeleteRunner(runner params.Instance) error
- // AddPool(ctx context.Context, pool params.Pool) error
- // PoolManager lifecycle functions. Start/stop pool.
+ // InstallWebhook will create a webhook in github for the entity associated with this pool manager.
+ InstallWebhook(ctx context.Context, param params.InstallWebhookParams) (params.HookInfo, error)
+ // GetWebhookInfo will return information about the webhook installed in github for the entity associated
+ GetWebhookInfo(ctx context.Context) (params.HookInfo, error)
+ // UninstallWebhook will remove the webhook installed in github for the entity associated with this pool manager.
+ UninstallWebhook(ctx context.Context) error
+
+ // RootCABundle will return a CA bundle that must be installed on all runners in order to properly validate
+ // x509 certificates used by various systems involved. This CA bundle is defined in the GARM config file and
+ // can include multiple CA certificates for the GARM api server, GHES server and any provider API endpoint that
+ // may use internal or self signed certificates.
+ RootCABundle() (params.CertificateBundle, error)
+
+ SetPoolRunningState(isRunning bool, failureReason string)
+
+ // Start will start the pool manager and all associated workers.
Start() error
+ // Stop will stop the pool manager and all associated workers.
Stop() error
+ // Status will return the current status of the pool manager.
Status() params.PoolManagerStatus
+ // Wait will block until the pool manager has stopped.
Wait() error
}
diff --git a/runner/common/provider.go b/runner/common/provider.go
index 1cdb7fbe..a5d0db66 100644
--- a/runner/common/provider.go
+++ b/runner/common/provider.go
@@ -21,22 +21,26 @@ import (
"github.com/cloudbase/garm/params"
)
-//go:generate mockery --all
+//go:generate go run github.com/vektra/mockery/v2@latest
type Provider interface {
// CreateInstance creates a new compute instance in the provider.
- CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance) (commonParams.ProviderInstance, error)
+ CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, createInstanceParams CreateInstanceParams) (commonParams.ProviderInstance, error)
// Delete instance will delete the instance in a provider.
- DeleteInstance(ctx context.Context, instance string) error
+ DeleteInstance(ctx context.Context, instance string, deleteInstanceParams DeleteInstanceParams) error
// GetInstance will return details about one instance.
- GetInstance(ctx context.Context, instance string) (commonParams.ProviderInstance, error)
+ GetInstance(ctx context.Context, instance string, getInstanceParams GetInstanceParams) (commonParams.ProviderInstance, error)
// ListInstances will list all instances for a provider.
- ListInstances(ctx context.Context, poolID string) ([]commonParams.ProviderInstance, error)
+ ListInstances(ctx context.Context, poolID string, listInstancesParams ListInstancesParams) ([]commonParams.ProviderInstance, error)
// RemoveAllInstances will remove all instances created by this provider.
- RemoveAllInstances(ctx context.Context) error
+ RemoveAllInstances(ctx context.Context, removeAllInstancesParams RemoveAllInstancesParams) error
// Stop shuts down the instance.
- Stop(ctx context.Context, instance string, force bool) error
+ Stop(ctx context.Context, instance string, stopParams StopParams) error
// Start boots up an instance.
- Start(ctx context.Context, instance string) error
+ Start(ctx context.Context, instance string, startParams StartParams) error
+ // DisableJITConfig tells us if the provider explicitly disables JIT configuration and
+ // forces runner registration tokens to be used. This may happen if a provider has not yet
+ // been updated to support JIT configuration.
+ DisableJITConfig() bool
AsParams() params.Provider
}
diff --git a/runner/common/util.go b/runner/common/util.go
index b23295e3..5130dcfd 100644
--- a/runner/common/util.go
+++ b/runner/common/util.go
@@ -1,47 +1,59 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package common
import (
"context"
+ "net/url"
- "github.com/google/go-github/v53/github"
+ "github.com/google/go-github/v72/github"
+
+ "github.com/cloudbase/garm/params"
)
+type GithubEntityOperations interface {
+ ListEntityHooks(ctx context.Context, opts *github.ListOptions) (ret []*github.Hook, response *github.Response, err error)
+ GetEntityHook(ctx context.Context, id int64) (ret *github.Hook, err error)
+ CreateEntityHook(ctx context.Context, hook *github.Hook) (ret *github.Hook, err error)
+ DeleteEntityHook(ctx context.Context, id int64) (ret *github.Response, err error)
+ PingEntityHook(ctx context.Context, id int64) (ret *github.Response, err error)
+ ListEntityRunners(ctx context.Context, opts *github.ListRunnersOptions) (*github.Runners, *github.Response, error)
+ ListEntityRunnerApplicationDownloads(ctx context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)
+ RemoveEntityRunner(ctx context.Context, runnerID int64) error
+ RateLimit(ctx context.Context) (*github.RateLimits, error)
+ CreateEntityRegistrationToken(ctx context.Context) (*github.RegistrationToken, *github.Response, error)
+ GetEntityJITConfig(ctx context.Context, instance string, pool params.Pool, labels []string) (jitConfigMap map[string]string, runner *github.Runner, err error)
+ GetEntityRunnerGroupIDByName(ctx context.Context, runnerGroupName string) (int64, error)
+
+ // GetEntity returns the GitHub entity for which the github client was instanciated.
+ GetEntity() params.ForgeEntity
+ // GithubBaseURL returns the base URL for the github or GHES API.
+ GithubBaseURL() *url.URL
+}
+
+type RateLimitClient interface {
+ RateLimit(ctx context.Context) (*github.RateLimits, error)
+}
+
// GithubClient that describes the minimum list of functions we need to interact with github.
// Allows for easier testing.
//
-//go:generate mockery --all
+//go:generate go run github.com/vektra/mockery/v2@latest
type GithubClient interface {
+ GithubEntityOperations
+
// GetWorkflowJobByID gets details about a single workflow job.
GetWorkflowJobByID(ctx context.Context, owner, repo string, jobID int64) (*github.WorkflowJob, *github.Response, error)
- // ListRunners lists all runners within a repository.
- ListRunners(ctx context.Context, owner, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error)
- // ListRunnerApplicationDownloads returns a list of github runner application downloads for the
- // various supported operating systems and architectures.
- ListRunnerApplicationDownloads(ctx context.Context, owner, repo string) ([]*github.RunnerApplicationDownload, *github.Response, error)
- // RemoveRunner removes one runner from a repository.
- RemoveRunner(ctx context.Context, owner, repo string, runnerID int64) (*github.Response, error)
- // CreateRegistrationToken creates a runner registration token for one repository.
- CreateRegistrationToken(ctx context.Context, owner, repo string) (*github.RegistrationToken, *github.Response, error)
-
- // ListOrganizationRunners lists all runners within an organization.
- ListOrganizationRunners(ctx context.Context, owner string, opts *github.ListOptions) (*github.Runners, *github.Response, error)
- // ListOrganizationRunnerApplicationDownloads returns a list of github runner application downloads for the
- // various supported operating systems and architectures.
- ListOrganizationRunnerApplicationDownloads(ctx context.Context, owner string) ([]*github.RunnerApplicationDownload, *github.Response, error)
- // RemoveOrganizationRunner removes one github runner from an organization.
- RemoveOrganizationRunner(ctx context.Context, owner string, runnerID int64) (*github.Response, error)
- // CreateOrganizationRegistrationToken creates a runner registration token for an organization.
- CreateOrganizationRegistrationToken(ctx context.Context, owner string) (*github.RegistrationToken, *github.Response, error)
-}
-
-type GithubEnterpriseClient interface {
- // ListRunners lists all runners within a repository.
- ListRunners(ctx context.Context, enterprise string, opts *github.ListOptions) (*github.Runners, *github.Response, error)
- // RemoveRunner removes one runner from an enterprise.
- RemoveRunner(ctx context.Context, enterprise string, runnerID int64) (*github.Response, error)
- // CreateRegistrationToken creates a runner registration token for an enterprise.
- CreateRegistrationToken(ctx context.Context, enterprise string) (*github.RegistrationToken, *github.Response, error)
- // ListRunnerApplicationDownloads returns a list of github runner application downloads for the
- // various supported operating systems and architectures.
- ListRunnerApplicationDownloads(ctx context.Context, enterprise string) ([]*github.RunnerApplicationDownload, *github.Response, error)
}
diff --git a/runner/common_test.go b/runner/common_test.go
new file mode 100644
index 00000000..247b5ab1
--- /dev/null
+++ b/runner/common_test.go
@@ -0,0 +1,23 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+const (
+ // nolint: gosec
+ notExistingCredentialsName = "not-existent-creds-name"
+ // nolint: gosec
+ invalidCredentialsName = "invalid-creds-name"
+ notExistingProviderName = "not-existent-provider-name"
+)
diff --git a/runner/enterprises.go b/runner/enterprises.go
index b86a4284..6b393abd 100644
--- a/runner/enterprises.go
+++ b/runner/enterprises.go
@@ -1,9 +1,24 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package runner
import (
"context"
+ "errors"
"fmt"
- "log"
+ "log/slog"
"strings"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
@@ -11,8 +26,6 @@ import (
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/pkg/errors"
)
func (r *Runner) CreateEnterprise(ctx context.Context, param params.CreateEnterpriseParams) (enterprise params.Enterprise, err error) {
@@ -22,58 +35,64 @@ func (r *Runner) CreateEnterprise(ctx context.Context, param params.CreateEnterp
err = param.Validate()
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "validating params")
+ return params.Enterprise{}, fmt.Errorf("error validating params: %w", err)
}
- creds, ok := r.credentials[param.CredentialsName]
- if !ok {
+ creds, err := r.store.GetGithubCredentialsByName(ctx, param.CredentialsName, true)
+ if err != nil {
return params.Enterprise{}, runnerErrors.NewBadRequestError("credentials %s not defined", param.CredentialsName)
}
- _, err = r.store.GetEnterprise(ctx, param.Name)
+ _, err = r.store.GetEnterprise(ctx, param.Name, creds.Endpoint.Name)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
} else {
return params.Enterprise{}, runnerErrors.NewConflictError("enterprise %s already exists", param.Name)
}
- enterprise, err = r.store.CreateEnterprise(ctx, param.Name, creds.Name, param.WebhookSecret)
+ enterprise, err = r.store.CreateEnterprise(ctx, param.Name, creds, param.WebhookSecret, param.PoolBalancerType)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "creating enterprise")
+ return params.Enterprise{}, fmt.Errorf("error creating enterprise: %w", err)
}
defer func() {
if err != nil {
if deleteErr := r.store.DeleteEnterprise(ctx, enterprise.ID); deleteErr != nil {
- log.Printf("failed to delete enterprise: %s", deleteErr)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to delete enterprise",
+ "enterprise_id", enterprise.ID)
}
}
}()
+ // Use the admin context in the pool manager. Any access control is already done above when
+ // updating the store.
var poolMgr common.PoolManager
poolMgr, err = r.poolManagerCtrl.CreateEnterprisePoolManager(r.ctx, enterprise, r.providers, r.store)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "creating enterprise pool manager")
+ return params.Enterprise{}, fmt.Errorf("error creating enterprise pool manager: %w", err)
}
if err := poolMgr.Start(); err != nil {
if deleteErr := r.poolManagerCtrl.DeleteEnterprisePoolManager(enterprise); deleteErr != nil {
- log.Printf("failed to cleanup pool manager for enterprise %s", enterprise.ID)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to cleanup pool manager for enterprise",
+ "enterprise_id", enterprise.ID)
}
- return params.Enterprise{}, errors.Wrap(err, "starting enterprise pool manager")
+ return params.Enterprise{}, fmt.Errorf("error starting enterprise pool manager: %w", err)
}
return enterprise, nil
}
-func (r *Runner) ListEnterprises(ctx context.Context) ([]params.Enterprise, error) {
+func (r *Runner) ListEnterprises(ctx context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error) {
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
- enterprises, err := r.store.ListEnterprises(ctx)
+ enterprises, err := r.store.ListEnterprises(ctx, filter)
if err != nil {
- return nil, errors.Wrap(err, "listing enterprises")
+ return nil, fmt.Errorf("error listing enterprises: %w", err)
}
var allEnterprises []params.Enterprise
@@ -99,7 +118,7 @@ func (r *Runner) GetEnterpriseByID(ctx context.Context, enterpriseID string) (pa
enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetEnterprisePoolManager(enterprise)
if err != nil {
@@ -117,29 +136,43 @@ func (r *Runner) DeleteEnterprise(ctx context.Context, enterpriseID string) erro
enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID)
if err != nil {
- return errors.Wrap(err, "fetching enterprise")
+ return fmt.Errorf("error fetching enterprise: %w", err)
}
- pools, err := r.store.ListEnterprisePools(ctx, enterpriseID)
+ entity, err := enterprise.GetEntity()
if err != nil {
- return errors.Wrap(err, "fetching enterprise pools")
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ pools, err := r.store.ListEntityPools(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching enterprise pools: %w", err)
}
if len(pools) > 0 {
- poolIds := []string{}
+ poolIDs := []string{}
for _, pool := range pools {
- poolIds = append(poolIds, pool.ID)
+ poolIDs = append(poolIDs, pool.ID)
}
- return runnerErrors.NewBadRequestError("enterprise has pools defined (%s)", strings.Join(poolIds, ", "))
+ return runnerErrors.NewBadRequestError("enterprise has pools defined (%s)", strings.Join(poolIDs, ", "))
+ }
+
+ scaleSets, err := r.store.ListEntityScaleSets(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching enterprise scale sets: %w", err)
+ }
+
+ if len(scaleSets) > 0 {
+ return runnerErrors.NewBadRequestError("enterprise has scale sets defined; delete them first")
}
if err := r.poolManagerCtrl.DeleteEnterprisePoolManager(enterprise); err != nil {
- return errors.Wrap(err, "deleting enterprise pool manager")
+ return fmt.Errorf("error deleting enterprise pool manager: %w", err)
}
if err := r.store.DeleteEnterprise(ctx, enterpriseID); err != nil {
- return errors.Wrapf(err, "removing enterprise %s", enterpriseID)
+ return fmt.Errorf("error removing enterprise %s: %w", enterpriseID, err)
}
return nil
}
@@ -152,26 +185,20 @@ func (r *Runner) UpdateEnterprise(ctx context.Context, enterpriseID string, para
r.mux.Lock()
defer r.mux.Unlock()
- enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ switch param.PoolBalancerType {
+ case params.PoolBalancerTypeRoundRobin, params.PoolBalancerTypePack, params.PoolBalancerTypeNone:
+ default:
+ return params.Enterprise{}, runnerErrors.NewBadRequestError("invalid pool balancer type: %s", param.PoolBalancerType)
}
- if param.CredentialsName != "" {
- // Check that credentials are set before saving to db
- if _, ok := r.credentials[param.CredentialsName]; !ok {
- return params.Enterprise{}, runnerErrors.NewBadRequestError("invalid credentials (%s) for enterprise %s", param.CredentialsName, enterprise.Name)
- }
+ enterprise, err := r.store.UpdateEnterprise(ctx, enterpriseID, param)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error updating enterprise: %w", err)
}
- enterprise, err = r.store.UpdateEnterprise(ctx, enterpriseID, param)
+ poolMgr, err := r.poolManagerCtrl.GetEnterprisePoolManager(enterprise)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "updating enterprise")
- }
-
- poolMgr, err := r.poolManagerCtrl.UpdateEnterprisePoolManager(r.ctx, enterprise)
- if err != nil {
- return params.Enterprise{}, fmt.Errorf("failed to update enterprise pool manager: %w", err)
+ return params.Enterprise{}, fmt.Errorf("failed to get enterprise pool manager: %w", err)
}
enterprise.PoolManagerStatus = poolMgr.Status()
@@ -183,30 +210,23 @@ func (r *Runner) CreateEnterprisePool(ctx context.Context, enterpriseID string,
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- r.mux.Lock()
- defer r.mux.Unlock()
-
- enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching enterprise")
- }
-
- if _, err := r.poolManagerCtrl.GetEnterprisePoolManager(enterprise); err != nil {
- return params.Pool{}, runnerErrors.ErrNotFound
- }
-
createPoolParams, err := r.appendTagsToCreatePoolParams(param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool params")
+ return params.Pool{}, fmt.Errorf("failed to append tags to create pool params: %w", err)
}
if param.RunnerBootstrapTimeout == 0 {
param.RunnerBootstrapTimeout = appdefaults.DefaultRunnerBootstrapTimeout
}
- pool, err := r.store.CreateEnterprisePool(ctx, enterpriseID, createPoolParams)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+
+ pool, err := r.store.CreateEntityPool(ctx, entity, createPoolParams)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "creating pool")
+ return params.Pool{}, fmt.Errorf("failed to create enterprise pool: %w", err)
}
return pool, nil
@@ -216,10 +236,13 @@ func (r *Runner) GetEnterprisePoolByID(ctx context.Context, enterpriseID, poolID
if !auth.IsAdmin(ctx) {
return params.Pool{}, runnerErrors.ErrUnauthorized
}
-
- pool, err := r.store.GetEnterprisePool(ctx, enterpriseID, poolID)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
return pool, nil
}
@@ -229,28 +252,28 @@ func (r *Runner) DeleteEnterprisePool(ctx context.Context, enterpriseID, poolID
return runnerErrors.ErrUnauthorized
}
- // TODO: dedup instance count verification
- pool, err := r.store.GetEnterprisePool(ctx, enterpriseID, poolID)
- if err != nil {
- return errors.Wrap(err, "fetching pool")
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
}
- instances, err := r.store.ListPoolInstances(ctx, pool.ID)
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return errors.Wrap(err, "fetching instances")
+ return fmt.Errorf("error fetching pool: %w", err)
}
+ // nolint:golangci-lint,godox
// TODO: implement a count function
- if len(instances) > 0 {
+ if len(pool.Instances) > 0 {
runnerIDs := []string{}
- for _, run := range instances {
+ for _, run := range pool.Instances {
runnerIDs = append(runnerIDs, run.ID)
}
return runnerErrors.NewBadRequestError("pool has runners: %s", strings.Join(runnerIDs, ", "))
}
- if err := r.store.DeleteEnterprisePool(ctx, enterpriseID, poolID); err != nil {
- return errors.Wrap(err, "deleting pool")
+ if err := r.store.DeleteEntityPool(ctx, entity, poolID); err != nil {
+ return fmt.Errorf("error deleting pool: %w", err)
}
return nil
}
@@ -260,9 +283,13 @@ func (r *Runner) ListEnterprisePools(ctx context.Context, enterpriseID string) (
return []params.Pool{}, runnerErrors.ErrUnauthorized
}
- pools, err := r.store.ListEnterprisePools(ctx, enterpriseID)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pools, err := r.store.ListEntityPools(ctx, entity)
if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
@@ -272,9 +299,13 @@ func (r *Runner) UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetEnterprisePool(ctx, enterpriseID, poolID)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
maxRunners := pool.MaxRunners
@@ -291,9 +322,9 @@ func (r *Runner) UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID
return params.Pool{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
}
- newPool, err := r.store.UpdateEnterprisePool(ctx, enterpriseID, poolID, param)
+ newPool, err := r.store.UpdateEntityPool(ctx, entity, poolID, param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "updating pool")
+ return params.Pool{}, fmt.Errorf("error updating pool: %w", err)
}
return newPool, nil
}
@@ -302,26 +333,29 @@ func (r *Runner) ListEnterpriseInstances(ctx context.Context, enterpriseID strin
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
-
- instances, err := r.store.ListEnterpriseInstances(ctx, enterpriseID)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ instances, err := r.store.ListEntityInstances(ctx, entity)
if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching instances")
+ return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err)
}
return instances, nil
}
-func (r *Runner) findEnterprisePoolManager(name string) (common.PoolManager, error) {
+func (r *Runner) findEnterprisePoolManager(name, endpointName string) (common.PoolManager, error) {
r.mux.Lock()
defer r.mux.Unlock()
- enterprise, err := r.store.GetEnterprise(r.ctx, name)
+ enterprise, err := r.store.GetEnterprise(r.ctx, name, endpointName)
if err != nil {
- return nil, errors.Wrap(err, "fetching enterprise")
+ return nil, fmt.Errorf("error fetching enterprise: %w", err)
}
poolManager, err := r.poolManagerCtrl.GetEnterprisePoolManager(enterprise)
if err != nil {
- return nil, errors.Wrap(err, "fetching pool manager for enterprise")
+ return nil, fmt.Errorf("error fetching pool manager for enterprise: %w", err)
}
return poolManager, nil
}
diff --git a/runner/enterprises_test.go b/runner/enterprises_test.go
index 809577a7..0724ccf9 100644
--- a/runner/enterprises_test.go
+++ b/runner/enterprises_test.go
@@ -16,22 +16,21 @@ package runner
import (
"context"
+ "errors"
"fmt"
"testing"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database"
dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
+ garmTesting "github.com/cloudbase/garm/internal/testing" //nolint:typecheck
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
runnerCommonMocks "github.com/cloudbase/garm/runner/common/mocks"
runnerMocks "github.com/cloudbase/garm/runner/mocks"
-
- "github.com/stretchr/testify/mock"
- "github.com/stretchr/testify/suite"
)
type EnterpriseTestFixtures struct {
@@ -40,13 +39,12 @@ type EnterpriseTestFixtures struct {
Store dbCommon.Store
StoreEnterprises map[string]params.Enterprise
Providers map[string]common.Provider
- Credentials map[string]config.Github
+ Credentials map[string]params.ForgeCredentials
CreateEnterpriseParams params.CreateEnterpriseParams
CreatePoolParams params.CreatePoolParams
CreateInstanceParams params.CreateInstanceParams
UpdateRepoParams params.UpdateEntityParams
UpdatePoolParams params.UpdatePoolParams
- UpdatePoolStateParams params.UpdatePoolStateParams
ErrMock error
ProviderMock *runnerCommonMocks.Provider
PoolMgrMock *runnerCommonMocks.PoolManager
@@ -57,18 +55,29 @@ type EnterpriseTestSuite struct {
suite.Suite
Fixtures *EnterpriseTestFixtures
Runner *Runner
+
+ testCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ forgeEndpoint params.ForgeEndpoint
+ ghesEndpoint params.ForgeEndpoint
+ ghesCreds params.ForgeCredentials
}
func (s *EnterpriseTestSuite) SetupTest() {
- adminCtx := auth.GetAdminContext()
-
// create testing sqlite database
dbCfg := garmTesting.GetTestSqliteDBConfig(s.T())
- db, err := database.NewDatabase(adminCtx, dbCfg)
+ db, err := database.NewDatabase(context.Background(), dbCfg)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.forgeEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.ghesEndpoint = garmTesting.CreateGHESEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.forgeEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.forgeEndpoint)
+ s.ghesCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "ghes-creds", db, s.T(), s.ghesEndpoint)
+
// create some organization objects in the database, for testing purposes
enterprises := map[string]params.Enterprise{}
for i := 1; i <= 3; i++ {
@@ -76,11 +85,12 @@ func (s *EnterpriseTestSuite) SetupTest() {
enterprise, err := db.CreateEnterprise(
adminCtx,
name,
- fmt.Sprintf("test-creds-%v", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%v", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
- s.FailNow(fmt.Sprintf("failed to create database object (test-enterprise-%v)", i))
+ s.FailNow(fmt.Sprintf("failed to create database object (test-enterprise-%v): %+v", i, err))
}
enterprises[name] = enterprise
}
@@ -97,16 +107,13 @@ func (s *EnterpriseTestSuite) SetupTest() {
Providers: map[string]common.Provider{
"test-provider": providerMock,
},
- Credentials: map[string]config.Github{
- "test-creds": {
- Name: "test-creds-name",
- Description: "test-creds-description",
- OAuth2Token: "test-creds-oauth2-token",
- },
+ Credentials: map[string]params.ForgeCredentials{
+ s.testCreds.Name: s.testCreds,
+ s.secondaryTestCreds.Name: s.secondaryTestCreds,
},
CreateEnterpriseParams: params.CreateEnterpriseParams{
Name: "test-enterprise-create",
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-create-enterprise-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -117,7 +124,7 @@ func (s *EnterpriseTestSuite) SetupTest() {
Flavor: "test",
OSType: "linux",
OSArch: "arm64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"arm64-linux-runner"},
RunnerBootstrapTimeout: 0,
},
CreateInstanceParams: params.CreateInstanceParams{
@@ -125,7 +132,7 @@ func (s *EnterpriseTestSuite) SetupTest() {
OSType: "linux",
},
UpdateRepoParams: params.UpdateEntityParams{
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -134,9 +141,6 @@ func (s *EnterpriseTestSuite) SetupTest() {
Image: "test-images-updated",
Flavor: "test-flavor-updated",
},
- UpdatePoolStateParams: params.UpdatePoolStateParams{
- WebhookSecret: "test-update-repo-webhook-secret",
- },
ErrMock: fmt.Errorf("mock error"),
ProviderMock: providerMock,
PoolMgrMock: runnerCommonMocks.NewPoolManager(s.T()),
@@ -147,7 +151,6 @@ func (s *EnterpriseTestSuite) SetupTest() {
// setup test runner
runner := &Runner{
providers: fixtures.Providers,
- credentials: fixtures.Credentials,
ctx: fixtures.AdminContext,
store: fixtures.Store,
poolManagerCtrl: fixtures.PoolMgrCtrlMock,
@@ -163,12 +166,13 @@ func (s *EnterpriseTestSuite) TestCreateEnterprise() {
// call tested function
enterprise, err := s.Runner.CreateEnterprise(s.Fixtures.AdminContext, s.Fixtures.CreateEnterpriseParams)
+ s.Require().Nil(err)
+ s.Require().Equal(s.Fixtures.CreateEnterpriseParams.Name, enterprise.Name)
+ s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateEnterpriseParams.CredentialsName].Name, enterprise.Credentials.Name)
+ s.Require().Equal(params.PoolBalancerTypeRoundRobin, enterprise.PoolBalancerType)
// assertions
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.CreateEnterpriseParams.Name, enterprise.Name)
- s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateEnterpriseParams.CredentialsName].Name, enterprise.CredentialsName)
}
func (s *EnterpriseTestSuite) TestCreateEnterpriseErrUnauthorized() {
@@ -184,7 +188,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseEmptyParams() {
}
func (s *EnterpriseTestSuite) TestCreateEnterpriseMissingCredentials() {
- s.Fixtures.CreateEnterpriseParams.CredentialsName = "not-existent-creds-name"
+ s.Fixtures.CreateEnterpriseParams.CredentialsName = notExistingCredentialsName
_, err := s.Runner.CreateEnterprise(s.Fixtures.AdminContext, s.Fixtures.CreateEnterpriseParams)
@@ -206,7 +210,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error creating enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterpriseStartPoolMgrFailed() {
@@ -218,20 +222,80 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseStartPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("starting enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error starting enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestListEnterprises() {
s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
- orgs, err := s.Runner.ListEnterprises(s.Fixtures.AdminContext)
+ orgs, err := s.Runner.ListEnterprises(s.Fixtures.AdminContext, params.EnterpriseFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), garmTesting.DBEntityMapToSlice(s.Fixtures.StoreEnterprises), orgs)
}
+func (s *EnterpriseTestSuite) TestListEnterprisesWithFilters() {
+ s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ enterprise, err := s.Fixtures.Store.CreateEnterprise(
+ s.Fixtures.AdminContext,
+ "test-enterprise",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ enterprise2, err := s.Fixtures.Store.CreateEnterprise(
+ s.Fixtures.AdminContext,
+ "test-enterprise2",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ enterprise3, err := s.Fixtures.Store.CreateEnterprise(
+ s.Fixtures.AdminContext,
+ "test-enterprise",
+ s.ghesCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ orgs, err := s.Runner.ListEnterprises(
+ s.Fixtures.AdminContext,
+ params.EnterpriseFilter{
+ Name: "test-enterprise",
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise, enterprise3}, orgs)
+
+ orgs, err = s.Runner.ListEnterprises(
+ s.Fixtures.AdminContext,
+ params.EnterpriseFilter{
+ Name: "test-enterprise",
+ Endpoint: s.ghesEndpoint.Name,
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise3}, orgs)
+
+ orgs, err = s.Runner.ListEnterprises(
+ s.Fixtures.AdminContext,
+ params.EnterpriseFilter{
+ Name: "test-enterprise2",
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise2}, orgs)
+}
+
func (s *EnterpriseTestSuite) TestListEnterprisesErrUnauthorized() {
- _, err := s.Runner.ListEnterprises(context.Background())
+ _, err := s.Runner.ListEnterprises(context.Background(), params.EnterpriseFilter{})
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
@@ -260,7 +324,7 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprise() {
s.Require().Nil(err)
_, err = s.Fixtures.Store.GetEnterpriseByID(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-3"].ID)
- s.Require().Equal("fetching enterprise: not found", err.Error())
+ s.Require().Equal("error fetching enterprise: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterpriseErrUnauthorized() {
@@ -270,7 +334,11 @@ func (s *EnterpriseTestSuite) TestDeleteEnterpriseErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolDefinedFailed() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create store enterprises pool: %v", err))
}
@@ -286,20 +354,23 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolMgrFailed() {
err := s.Runner.DeleteEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("deleting enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error deleting enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestUpdateEnterprise() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateEnterprisePoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
- org, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.UpdateRepoParams)
+ param := s.Fixtures.UpdateRepoParams
+ param.PoolBalancerType = params.PoolBalancerTypePack
+ org, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, param)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, org.WebhookSecret)
+ s.Require().Equal(params.PoolBalancerTypePack, org.PoolBalancerType)
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseErrUnauthorized() {
@@ -309,34 +380,34 @@ func (s *EnterpriseTestSuite) TestUpdateEnterpriseErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseInvalidCreds() {
- s.Fixtures.UpdateRepoParams.CredentialsName = "invalid-creds-name"
+ s.Fixtures.UpdateRepoParams.CredentialsName = invalidCredentialsName
_, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Require().Equal(runnerErrors.NewBadRequestError("invalid credentials (%s) for enterprise %s", s.Fixtures.UpdateRepoParams.CredentialsName, s.Fixtures.StoreEnterprises["test-enterprise-1"].Name), err)
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.FailNow(fmt.Sprintf("expected error: %v", runnerErrors.ErrNotFound))
+ }
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateEnterprisePoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.UpdateRepoParams)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("failed to update enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("failed to get enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseCreateEnterprisePoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateEnterprisePoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.UpdateRepoParams)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("failed to update enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("failed to get enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePool() {
- s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
-
pool, err := s.Runner.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
@@ -360,30 +431,21 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolErrUnauthorized() {
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
-func (s *EnterpriseTestSuite) TestCreateEnterprisePoolErrNotFound() {
- s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, runnerErrors.ErrNotFound)
-
- _, err := s.Runner.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
-
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
- s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(runnerErrors.ErrNotFound, err)
-}
-
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolFetchPoolParamsFailed() {
- s.Fixtures.CreatePoolParams.ProviderName = "not-existent-provider-name"
-
- s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
-
+ s.Fixtures.CreatePoolParams.ProviderName = notExistingProviderName
_, err := s.Runner.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Regexp("fetching pool params: no such provider", err.Error())
+ s.Require().Regexp("failed to append tags to create pool params: no such provider not-existent-provider-name", err.Error())
}
func (s *EnterpriseTestSuite) TestGetEnterprisePoolByID() {
- enterprisePool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ enterprisePool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %s", err))
}
@@ -401,7 +463,11 @@ func (s *EnterpriseTestSuite) TestGetEnterprisePoolByIDErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePool() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %s", err))
}
@@ -410,8 +476,8 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprisePool() {
s.Require().Nil(err)
- _, err = s.Fixtures.Store.GetEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Fixtures.Store.GetEntityPool(s.Fixtures.AdminContext, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolErrUnauthorized() {
@@ -421,7 +487,11 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolRunnersFailed() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
@@ -436,10 +506,14 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolRunnersFailed() {
}
func (s *EnterpriseTestSuite) TestListEnterprisePools() {
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
enterprisePools := []params.Pool{}
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Image = fmt.Sprintf("test-enterprise-%v", i)
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
@@ -459,7 +533,11 @@ func (s *EnterpriseTestSuite) TestListOrgPoolsErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePool() {
- enterprisePool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ enterprisePool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %s", err))
}
@@ -478,7 +556,11 @@ func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolMinIdleGreaterThanMax() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %s", err))
}
@@ -493,7 +575,11 @@ func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolMinIdleGreaterThanMax() {
}
func (s *EnterpriseTestSuite) TestListEnterpriseInstances() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
@@ -522,7 +608,7 @@ func (s *EnterpriseTestSuite) TestListEnterpriseInstancesErrUnauthorized() {
func (s *EnterpriseTestSuite) TestFindEnterprisePoolManager() {
s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
- poolManager, err := s.Runner.findEnterprisePoolManager(s.Fixtures.StoreEnterprises["test-enterprise-1"].Name)
+ poolManager, err := s.Runner.findEnterprisePoolManager(s.Fixtures.StoreEnterprises["test-enterprise-1"].Name, s.Fixtures.StoreEnterprises["test-enterprise-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
@@ -533,7 +619,7 @@ func (s *EnterpriseTestSuite) TestFindEnterprisePoolManager() {
func (s *EnterpriseTestSuite) TestFindEnterprisePoolManagerFetchPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- _, err := s.Runner.findEnterprisePoolManager(s.Fixtures.StoreEnterprises["test-enterprise-1"].Name)
+ _, err := s.Runner.findEnterprisePoolManager(s.Fixtures.StoreEnterprises["test-enterprise-1"].Name, s.Fixtures.StoreEnterprises["test-enterprise-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
diff --git a/runner/gitea_credentials.go b/runner/gitea_credentials.go
new file mode 100644
index 00000000..d66212f9
--- /dev/null
+++ b/runner/gitea_credentials.go
@@ -0,0 +1,99 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ // Get the credentials from the store. The cache is always updated after the database successfully
+ // commits the transaction that created/updated the credentials.
+ // If we create a set of credentials then immediately after we call ListGiteaCredentials,
+ // there is a posibillity that not all creds will be in the cache.
+ creds, err := r.store.ListGiteaCredentials(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ return creds, nil
+}
+
+func (r *Runner) CreateGiteaCredentials(ctx context.Context, param params.CreateGiteaCredentialsParams) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to validate gitea credentials params: %w", err)
+ }
+
+ creds, err := r.store.CreateGiteaCredentials(ctx, param)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to create gitea credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (r *Runner) GetGiteaCredentials(ctx context.Context, id uint) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ creds, err := r.store.GetGiteaCredentials(ctx, id, true)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to get gitea credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (r *Runner) DeleteGiteaCredentials(ctx context.Context, id uint) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ if err := r.store.DeleteGiteaCredentials(ctx, id); err != nil {
+ return fmt.Errorf("error failed to delete gitea credentials: %w", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) UpdateGiteaCredentials(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to validate gitea credentials params: %w", err)
+ }
+
+ newCreds, err := r.store.UpdateGiteaCredentials(ctx, id, param)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to update gitea credentials: %w", err)
+ }
+
+ return newCreds, nil
+}
diff --git a/runner/gitea_endpoints.go b/runner/gitea_endpoints.go
new file mode 100644
index 00000000..4a7e32d9
--- /dev/null
+++ b/runner/gitea_endpoints.go
@@ -0,0 +1,95 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) CreateGiteaEndpoint(ctx context.Context, param params.CreateGiteaEndpointParams) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to validate gitea endpoint params: %w", err)
+ }
+
+ ep, err := r.store.CreateGiteaEndpoint(ctx, param)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to create gitea endpoint: %w", err)
+ }
+
+ return ep, nil
+}
+
+func (r *Runner) GetGiteaEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+ endpoint, err := r.store.GetGiteaEndpoint(ctx, name)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to get gitea endpoint: %w", err)
+ }
+
+ return endpoint, nil
+}
+
+func (r *Runner) DeleteGiteaEndpoint(ctx context.Context, name string) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ err := r.store.DeleteGiteaEndpoint(ctx, name)
+ if err != nil {
+ return fmt.Errorf("failed to delete gitea endpoint: %w", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) UpdateGiteaEndpoint(ctx context.Context, name string, param params.UpdateGiteaEndpointParams) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to validate gitea endpoint params: %w", err)
+ }
+
+ newEp, err := r.store.UpdateGiteaEndpoint(ctx, name, param)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to update gitea endpoint: %w", err)
+ }
+ return newEp, nil
+}
+
+func (r *Runner) ListGiteaEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ endpoints, err := r.store.ListGiteaEndpoints(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list gitea endpoints: %w", err)
+ }
+
+ return endpoints, nil
+}
diff --git a/runner/github_credentials.go b/runner/github_credentials.go
new file mode 100644
index 00000000..5e1291ff
--- /dev/null
+++ b/runner/github_credentials.go
@@ -0,0 +1,115 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/cache"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) ListCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ // Get the credentials from the store. The cache is always updated after the database successfully
+ // commits the transaction that created/updated the credentials.
+ // If we create a set of credentials then immediately after we call ListCredentials,
+ // there is a posibillity that not all creds will be in the cache.
+ creds, err := r.store.ListGithubCredentials(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ // If we do have cache, update the rate limit for each credential. The rate limits are queried
+ // every 30 seconds and set in cache.
+ credsCache := cache.GetAllGithubCredentialsAsMap()
+ for idx, cred := range creds {
+ inCache, ok := credsCache[cred.ID]
+ if ok {
+ creds[idx].RateLimit = inCache.RateLimit
+ }
+ }
+ return creds, nil
+}
+
+func (r *Runner) CreateGithubCredentials(ctx context.Context, param params.CreateGithubCredentialsParams) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to validate github credentials params: %w", err)
+ }
+
+ creds, err := r.store.CreateGithubCredentials(ctx, param)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to create github credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (r *Runner) GetGithubCredentials(ctx context.Context, id uint) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ creds, err := r.store.GetGithubCredentials(ctx, id, true)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to get github credentials: %w", err)
+ }
+
+ cached, ok := cache.GetGithubCredentials((creds.ID))
+ if ok {
+ creds.RateLimit = cached.RateLimit
+ }
+
+ return creds, nil
+}
+
+func (r *Runner) DeleteGithubCredentials(ctx context.Context, id uint) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ if err := r.store.DeleteGithubCredentials(ctx, id); err != nil {
+ return fmt.Errorf("failed to delete github credentials: %w", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) UpdateGithubCredentials(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to validate github credentials params: %w", err)
+ }
+
+ newCreds, err := r.store.UpdateGithubCredentials(ctx, id, param)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to update github credentials: %w", err)
+ }
+
+ return newCreds, nil
+}
diff --git a/runner/github_endpoints.go b/runner/github_endpoints.go
new file mode 100644
index 00000000..29965081
--- /dev/null
+++ b/runner/github_endpoints.go
@@ -0,0 +1,95 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) CreateGithubEndpoint(ctx context.Context, param params.CreateGithubEndpointParams) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error failed to validate github endpoint params: %w", err)
+ }
+
+ ep, err := r.store.CreateGithubEndpoint(ctx, param)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to create github endpoint: %w", err)
+ }
+
+ return ep, nil
+}
+
+func (r *Runner) GetGithubEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+ endpoint, err := r.store.GetGithubEndpoint(ctx, name)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to get github endpoint: %w", err)
+ }
+
+ return endpoint, nil
+}
+
+func (r *Runner) DeleteGithubEndpoint(ctx context.Context, name string) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ err := r.store.DeleteGithubEndpoint(ctx, name)
+ if err != nil {
+ return fmt.Errorf("failed to delete github endpoint: %w", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) UpdateGithubEndpoint(ctx context.Context, name string, param params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to validate github endpoint params: %w", err)
+ }
+
+ newEp, err := r.store.UpdateGithubEndpoint(ctx, name, param)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to update github endpoint: %w", err)
+ }
+ return newEp, nil
+}
+
+func (r *Runner) ListGithubEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ endpoints, err := r.store.ListGithubEndpoints(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list github endpoints: %w", err)
+ }
+
+ return endpoints, nil
+}
diff --git a/runner/interfaces.go b/runner/interfaces.go
index 05ae9c0f..3d4703f7 100644
--- a/runner/interfaces.go
+++ b/runner/interfaces.go
@@ -24,7 +24,6 @@ import (
type RepoPoolManager interface {
CreateRepoPoolManager(ctx context.Context, repo params.Repository, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error)
- UpdateRepoPoolManager(ctx context.Context, repo params.Repository) (common.PoolManager, error)
GetRepoPoolManager(repo params.Repository) (common.PoolManager, error)
DeleteRepoPoolManager(repo params.Repository) error
GetRepoPoolManagers() (map[string]common.PoolManager, error)
@@ -32,7 +31,6 @@ type RepoPoolManager interface {
type OrgPoolManager interface {
CreateOrgPoolManager(ctx context.Context, org params.Organization, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error)
- UpdateOrgPoolManager(ctx context.Context, org params.Organization) (common.PoolManager, error)
GetOrgPoolManager(org params.Organization) (common.PoolManager, error)
DeleteOrgPoolManager(org params.Organization) error
GetOrgPoolManagers() (map[string]common.PoolManager, error)
@@ -40,13 +38,12 @@ type OrgPoolManager interface {
type EnterprisePoolManager interface {
CreateEnterprisePoolManager(ctx context.Context, enterprise params.Enterprise, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error)
- UpdateEnterprisePoolManager(ctx context.Context, enterprise params.Enterprise) (common.PoolManager, error)
GetEnterprisePoolManager(enterprise params.Enterprise) (common.PoolManager, error)
DeleteEnterprisePoolManager(enterprise params.Enterprise) error
GetEnterprisePoolManagers() (map[string]common.PoolManager, error)
}
-//go:generate mockery --name=PoolManagerController
+//go:generate go run github.com/vektra/mockery/v2@latest
type PoolManagerController interface {
RepoPoolManager
diff --git a/runner/metadata.go b/runner/metadata.go
new file mode 100644
index 00000000..b309b96e
--- /dev/null
+++ b/runner/metadata.go
@@ -0,0 +1,300 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "html/template"
+ "log/slog"
+
+ "github.com/cloudbase/garm-provider-common/defaults"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+)
+
+var githubSystemdUnitTemplate = `[Unit]
+Description=GitHub Actions Runner ({{.ServiceName}})
+After=network.target
+
+[Service]
+ExecStart=/home/{{.RunAsUser}}/actions-runner/runsvc.sh
+User={{.RunAsUser}}
+WorkingDirectory=/home/{{.RunAsUser}}/actions-runner
+KillMode=process
+KillSignal=SIGTERM
+TimeoutStopSec=5min
+
+[Install]
+WantedBy=multi-user.target
+`
+
+var giteaSystemdUnitTemplate = `[Unit]
+Description=Act Runner ({{.ServiceName}})
+After=network.target
+
+[Service]
+ExecStart=/home/{{.RunAsUser}}/act-runner/act_runner daemon --once
+User={{.RunAsUser}}
+WorkingDirectory=/home/{{.RunAsUser}}/act-runner
+KillMode=process
+KillSignal=SIGTERM
+TimeoutStopSec=5min
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+`
+
+func validateInstanceState(ctx context.Context) (params.Instance, error) {
+ status := auth.InstanceRunnerStatus(ctx)
+ if status != params.RunnerPending && status != params.RunnerInstalling {
+ return params.Instance{}, runnerErrors.ErrUnauthorized
+ }
+
+ instance, err := auth.InstanceParams(ctx)
+ if err != nil {
+ return params.Instance{}, runnerErrors.ErrUnauthorized
+ }
+ return instance, nil
+}
+
+func (r *Runner) getForgeEntityFromInstance(ctx context.Context, instance params.Instance) (params.ForgeEntity, error) {
+ var entityGetter params.EntityGetter
+ var err error
+ switch {
+ case instance.PoolID != "":
+ entityGetter, err = r.store.GetPoolByID(r.ctx, instance.PoolID)
+ case instance.ScaleSetID != 0:
+ entityGetter, err = r.store.GetScaleSetByID(r.ctx, instance.ScaleSetID)
+ default:
+ return params.ForgeEntity{}, errors.New("instance not associated with a pool or scale set")
+ }
+
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get entity getter",
+ "instance", instance.Name)
+ return params.ForgeEntity{}, fmt.Errorf("error fetching entity getter: %w", err)
+ }
+
+ poolEntity, err := entityGetter.GetEntity()
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get entity",
+ "instance", instance.Name)
+ return params.ForgeEntity{}, fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ entity, err := r.store.GetForgeEntity(r.ctx, poolEntity.EntityType, poolEntity.ID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get entity",
+ "instance", instance.Name)
+ return params.ForgeEntity{}, fmt.Errorf("error fetching entity: %w", err)
+ }
+ return entity, nil
+}
+
+func (r *Runner) getServiceNameForEntity(entity params.ForgeEntity) (string, error) {
+ switch entity.EntityType {
+ case params.ForgeEntityTypeEnterprise:
+ return fmt.Sprintf("actions.runner.%s.%s", entity.Owner, entity.Name), nil
+ case params.ForgeEntityTypeOrganization:
+ return fmt.Sprintf("actions.runner.%s.%s", entity.Owner, entity.Name), nil
+ case params.ForgeEntityTypeRepository:
+ return fmt.Sprintf("actions.runner.%s-%s.%s", entity.Owner, entity.Name, entity.Name), nil
+ default:
+ return "", errors.New("unknown entity type")
+ }
+}
+
+func (r *Runner) GetRunnerServiceName(ctx context.Context) (string, error) {
+ instance, err := validateInstanceState(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return "", runnerErrors.ErrUnauthorized
+ }
+ entity, err := r.getForgeEntityFromInstance(ctx, instance)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get entity", "error", err)
+ return "", fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ serviceName, err := r.getServiceNameForEntity(entity)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get service name", "error", err)
+ return "", fmt.Errorf("error fetching service name: %w", err)
+ }
+ return serviceName, nil
+}
+
+func (r *Runner) GenerateSystemdUnitFile(ctx context.Context, runAsUser string) ([]byte, error) {
+ instance, err := validateInstanceState(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return nil, runnerErrors.ErrUnauthorized
+ }
+ entity, err := r.getForgeEntityFromInstance(ctx, instance)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get entity", "error", err)
+ return nil, fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ serviceName, err := r.getServiceNameForEntity(entity)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get service name", "error", err)
+ return nil, fmt.Errorf("error fetching service name: %w", err)
+ }
+
+ var unitTemplate *template.Template
+ switch entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ unitTemplate, err = template.New("").Parse(githubSystemdUnitTemplate)
+ case params.GiteaEndpointType:
+ unitTemplate, err = template.New("").Parse(giteaSystemdUnitTemplate)
+ default:
+ slog.ErrorContext(r.ctx, "unknown forge type", "forge_type", entity.Credentials.ForgeType)
+ return nil, errors.New("unknown forge type")
+ }
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to parse template", "error", err)
+ return nil, fmt.Errorf("error parsing template: %w", err)
+ }
+
+ if runAsUser == "" {
+ runAsUser = defaults.DefaultUser
+ }
+
+ data := struct {
+ ServiceName string
+ RunAsUser string
+ }{
+ ServiceName: serviceName,
+ RunAsUser: runAsUser,
+ }
+
+ var unitFile bytes.Buffer
+ if err := unitTemplate.Execute(&unitFile, data); err != nil {
+ slog.ErrorContext(r.ctx, "failed to execute template", "error", err)
+ return nil, fmt.Errorf("error executing template: %w", err)
+ }
+ return unitFile.Bytes(), nil
+}
+
+func (r *Runner) GetJITConfigFile(ctx context.Context, file string) ([]byte, error) {
+ if !auth.InstanceHasJITConfig(ctx) {
+ return nil, runnerErrors.NewNotFoundError("instance not configured for JIT")
+ }
+
+ instance, err := validateInstanceState(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return nil, runnerErrors.ErrUnauthorized
+ }
+ jitConfig := instance.JitConfiguration
+ contents, ok := jitConfig[file]
+ if !ok {
+ return nil, runnerErrors.NewNotFoundError("could not find file %q", file)
+ }
+
+ decoded, err := base64.StdEncoding.DecodeString(contents)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding file contents: %w", err)
+ }
+
+ return decoded, nil
+}
+
+func (r *Runner) GetInstanceGithubRegistrationToken(ctx context.Context) (string, error) {
+ // Check if this instance already fetched a registration token or if it was configured using
+ // the new Just In Time runner feature. If we're still using the old way of configuring a runner,
+ // we only allow an instance to fetch one token. If the instance fails to bootstrap after a token
+ // is fetched, we reset the token fetched field when re-queueing the instance.
+ if auth.InstanceTokenFetched(ctx) || auth.InstanceHasJITConfig(ctx) {
+ return "", runnerErrors.ErrUnauthorized
+ }
+
+ status := auth.InstanceRunnerStatus(ctx)
+ if status != params.RunnerPending && status != params.RunnerInstalling {
+ return "", runnerErrors.ErrUnauthorized
+ }
+
+ instance, err := auth.InstanceParams(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return "", runnerErrors.ErrUnauthorized
+ }
+
+ poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
+ if err != nil {
+ return "", fmt.Errorf("error fetching pool manager for instance: %w", err)
+ }
+
+ token, err := poolMgr.GithubRunnerRegistrationToken()
+ if err != nil {
+ return "", fmt.Errorf("error fetching runner token: %w", err)
+ }
+
+ tokenFetched := true
+ updateParams := params.UpdateInstanceParams{
+ TokenFetched: &tokenFetched,
+ }
+
+ if _, err := r.store.UpdateInstance(r.ctx, instance.Name, updateParams); err != nil {
+ return "", fmt.Errorf("error setting token_fetched for instance: %w", err)
+ }
+
+ if err := r.store.AddInstanceEvent(ctx, instance.Name, params.FetchTokenEvent, params.EventInfo, "runner registration token was retrieved"); err != nil {
+ return "", fmt.Errorf("error recording event: %w", err)
+ }
+
+ return token, nil
+}
+
+func (r *Runner) GetRootCertificateBundle(ctx context.Context) (params.CertificateBundle, error) {
+ instance, err := auth.InstanceParams(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return params.CertificateBundle{}, runnerErrors.ErrUnauthorized
+ }
+
+ poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
+ if err != nil {
+ return params.CertificateBundle{}, fmt.Errorf("error fetching pool manager for instance: %w", err)
+ }
+
+ bundle, err := poolMgr.RootCABundle()
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get root CA bundle",
+ "instance", instance.Name,
+ "pool_manager", poolMgr.ID())
+ // The root CA bundle is invalid. Return an empty bundle to the runner and log the event.
+ return params.CertificateBundle{
+ RootCertificates: make(map[string][]byte),
+ }, nil
+ }
+ return bundle, nil
+}
diff --git a/runner/metrics/enterprise.go b/runner/metrics/enterprise.go
new file mode 100644
index 00000000..be6eba66
--- /dev/null
+++ b/runner/metrics/enterprise.go
@@ -0,0 +1,50 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner" //nolint:typecheck
+)
+
+// CollectOrganizationMetric collects the metrics for the enterprise objects
+func CollectEnterpriseMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.EnterpriseInfo.Reset()
+ metrics.EnterprisePoolManagerStatus.Reset()
+
+ enterprises, err := r.ListEnterprises(ctx, params.EnterpriseFilter{})
+ if err != nil {
+ return err
+ }
+
+ for _, enterprise := range enterprises {
+ metrics.EnterpriseInfo.WithLabelValues(
+ enterprise.Name, // label: name
+ enterprise.ID, // label: id
+ ).Set(1)
+
+ metrics.EnterprisePoolManagerStatus.WithLabelValues(
+ enterprise.Name, // label: name
+ enterprise.ID, // label: id
+ strconv.FormatBool(enterprise.PoolManagerStatus.IsRunning), // label: running
+ ).Set(metrics.Bool2float64(enterprise.PoolManagerStatus.IsRunning))
+ }
+ return nil
+}
diff --git a/runner/metrics/health.go b/runner/metrics/health.go
new file mode 100644
index 00000000..fcd254df
--- /dev/null
+++ b/runner/metrics/health.go
@@ -0,0 +1,31 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+)
+
+func CollectHealthMetric(controllerInfo params.ControllerInfo) error {
+ metrics.GarmHealth.WithLabelValues(
+ controllerInfo.MetadataURL, // label: metadata_url
+ controllerInfo.CallbackURL, // label: callback_url
+ controllerInfo.WebhookURL, // label: webhook_url
+ controllerInfo.ControllerWebhookURL, // label: controller_webhook_url
+ controllerInfo.ControllerID.String(), // label: controller_id
+ ).Set(1)
+ return nil
+}
diff --git a/runner/metrics/instance.go b/runner/metrics/instance.go
new file mode 100644
index 00000000..bc6bed0a
--- /dev/null
+++ b/runner/metrics/instance.go
@@ -0,0 +1,79 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/runner"
+)
+
+// CollectInstanceMetric collects the metrics for the runner instances
+// reflecting the statuses and the pool they belong to.
+func CollectInstanceMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.InstanceStatus.Reset()
+
+ instances, err := r.ListAllInstances(ctx)
+ if err != nil {
+ return err
+ }
+
+ pools, err := r.ListAllPools(ctx)
+ if err != nil {
+ return err
+ }
+
+ type poolInfo struct {
+ Name string
+ Type string
+ ProviderName string
+ }
+
+ poolNames := make(map[string]poolInfo)
+ for _, pool := range pools {
+ switch {
+ case pool.OrgName != "":
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.OrgName,
+ Type: string(pool.PoolType()),
+ }
+ case pool.EnterpriseName != "":
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.EnterpriseName,
+ Type: string(pool.PoolType()),
+ }
+ default:
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.RepoName,
+ Type: string(pool.PoolType()),
+ }
+ }
+ }
+
+ for _, instance := range instances {
+ metrics.InstanceStatus.WithLabelValues(
+ instance.Name, // label: name
+ string(instance.Status), // label: status
+ string(instance.RunnerStatus), // label: runner_status
+ poolNames[instance.PoolID].Name, // label: pool_owner
+ poolNames[instance.PoolID].Type, // label: pool_type
+ instance.PoolID, // label: pool_id
+ poolNames[instance.PoolID].ProviderName, // label: provider
+ ).Set(1)
+ }
+ return nil
+}
diff --git a/runner/metrics/metrics.go b/runner/metrics/metrics.go
new file mode 100644
index 00000000..772ba86a
--- /dev/null
+++ b/runner/metrics/metrics.go
@@ -0,0 +1,104 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "log/slog"
+ "time"
+
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner"
+)
+
+func CollectObjectMetric(ctx context.Context, r *runner.Runner, duration time.Duration) {
+ ctx = auth.GetAdminContext(ctx)
+
+ // get controller info for health metrics
+ controllerInfo, err := r.GetControllerInfo(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "cannot get controller info")
+ }
+
+ // we do not want to wait until the first ticker happens
+ // for that we start an initial collection immediately
+ slog.DebugContext(ctx, "collecting metrics")
+ if err := collectMetrics(ctx, r, controllerInfo); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "cannot collect metrics")
+ }
+
+ go func() {
+ ticker := time.NewTicker(duration)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ slog.DebugContext(ctx, "collecting metrics")
+
+ if err := collectMetrics(ctx, r, controllerInfo); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "cannot collect metrics")
+ }
+ }
+ }
+ }()
+}
+
+func collectMetrics(ctx context.Context, r *runner.Runner, controllerInfo params.ControllerInfo) error {
+ slog.DebugContext(ctx, "collecting organization metrics")
+ err := CollectOrganizationMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting enterprise metrics")
+ err = CollectEnterpriseMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting repository metrics")
+ err = CollectRepositoryMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting provider metrics")
+ err = CollectProviderMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting pool metrics")
+ err = CollectPoolMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting instance metrics")
+ err = CollectInstanceMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting health metrics")
+ err = CollectHealthMetric(controllerInfo)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/runner/metrics/organization.go b/runner/metrics/organization.go
new file mode 100644
index 00000000..6bf6d9e5
--- /dev/null
+++ b/runner/metrics/organization.go
@@ -0,0 +1,50 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner"
+)
+
+// CollectOrganizationMetric collects the metrics for the organization objects
+func CollectOrganizationMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.OrganizationInfo.Reset()
+ metrics.OrganizationPoolManagerStatus.Reset()
+
+ organizations, err := r.ListOrganizations(ctx, params.OrganizationFilter{})
+ if err != nil {
+ return err
+ }
+
+ for _, organization := range organizations {
+ metrics.OrganizationInfo.WithLabelValues(
+ organization.Name, // label: name
+ organization.ID, // label: id
+ ).Set(1)
+
+ metrics.OrganizationPoolManagerStatus.WithLabelValues(
+ organization.Name, // label: name
+ organization.ID, // label: id
+ strconv.FormatBool(organization.PoolManagerStatus.IsRunning), // label: running
+ ).Set(metrics.Bool2float64(organization.PoolManagerStatus.IsRunning))
+ }
+ return nil
+}
diff --git a/runner/metrics/pool.go b/runner/metrics/pool.go
new file mode 100644
index 00000000..6b06a8b9
--- /dev/null
+++ b/runner/metrics/pool.go
@@ -0,0 +1,101 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/runner"
+)
+
+// CollectPoolMetric collects the metrics for the pool objects
+func CollectPoolMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.PoolInfo.Reset()
+ metrics.PoolStatus.Reset()
+ metrics.PoolMaxRunners.Reset()
+ metrics.PoolMinIdleRunners.Reset()
+ metrics.PoolBootstrapTimeout.Reset()
+
+ pools, err := r.ListAllPools(ctx)
+ if err != nil {
+ return err
+ }
+
+ type poolInfo struct {
+ Name string
+ Type string
+ }
+
+ poolNames := make(map[string]poolInfo)
+ for _, pool := range pools {
+ switch {
+ case pool.OrgName != "":
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.OrgName,
+ Type: string(pool.PoolType()),
+ }
+ case pool.EnterpriseName != "":
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.EnterpriseName,
+ Type: string(pool.PoolType()),
+ }
+ default:
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.RepoName,
+ Type: string(pool.PoolType()),
+ }
+ }
+
+ var poolTags []string
+ for _, tag := range pool.Tags {
+ poolTags = append(poolTags, tag.Name)
+ }
+
+ metrics.PoolInfo.WithLabelValues(
+ pool.ID, // label: id
+ pool.Image, // label: image
+ pool.Flavor, // label: flavor
+ pool.Prefix, // label: prefix
+ string(pool.OSType), // label: os_type
+ string(pool.OSArch), // label: os_arch
+ strings.Join(poolTags, ","), // label: tags
+ pool.ProviderName, // label: provider
+ poolNames[pool.ID].Name, // label: pool_owner
+ poolNames[pool.ID].Type, // label: pool_type
+ ).Set(1)
+
+ metrics.PoolStatus.WithLabelValues(
+ pool.ID, // label: id
+ strconv.FormatBool(pool.Enabled), // label: enabled
+ ).Set(metrics.Bool2float64(pool.Enabled))
+
+ metrics.PoolMaxRunners.WithLabelValues(
+ pool.ID, // label: id
+ ).Set(float64(pool.MaxRunners))
+
+ metrics.PoolMinIdleRunners.WithLabelValues(
+ pool.ID, // label: id
+ ).Set(float64(pool.MinIdleRunners))
+
+ metrics.PoolBootstrapTimeout.WithLabelValues(
+ pool.ID, // label: id
+ ).Set(float64(pool.RunnerBootstrapTimeout))
+ }
+ return nil
+}
diff --git a/runner/metrics/provider.go b/runner/metrics/provider.go
new file mode 100644
index 00000000..1d7a065d
--- /dev/null
+++ b/runner/metrics/provider.go
@@ -0,0 +1,40 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/runner"
+)
+
+func CollectProviderMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.ProviderInfo.Reset()
+
+ providers, err := r.ListProviders(ctx)
+ if err != nil {
+ return err
+ }
+ for _, provider := range providers {
+ metrics.ProviderInfo.WithLabelValues(
+ provider.Name, // label: name
+ string(provider.ProviderType), // label: type
+ provider.Description, // label: description
+ ).Set(1)
+ }
+ return nil
+}
diff --git a/runner/metrics/repository.go b/runner/metrics/repository.go
new file mode 100644
index 00000000..a2e8fa57
--- /dev/null
+++ b/runner/metrics/repository.go
@@ -0,0 +1,49 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner"
+)
+
+func CollectRepositoryMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.EnterpriseInfo.Reset()
+ metrics.EnterprisePoolManagerStatus.Reset()
+
+ repositories, err := r.ListRepositories(ctx, params.RepositoryFilter{})
+ if err != nil {
+ return err
+ }
+
+ for _, repository := range repositories {
+ metrics.EnterpriseInfo.WithLabelValues(
+ repository.Name, // label: name
+ repository.ID, // label: id
+ ).Set(1)
+
+ metrics.EnterprisePoolManagerStatus.WithLabelValues(
+ repository.Name, // label: name
+ repository.ID, // label: id
+ strconv.FormatBool(repository.PoolManagerStatus.IsRunning), // label: running
+ ).Set(metrics.Bool2float64(repository.PoolManagerStatus.IsRunning))
+ }
+ return nil
+}
diff --git a/runner/mocks/PoolManagerController.go b/runner/mocks/PoolManagerController.go
index d422508a..b17196ec 100644
--- a/runner/mocks/PoolManagerController.go
+++ b/runner/mocks/PoolManagerController.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
@@ -19,10 +19,22 @@ type PoolManagerController struct {
mock.Mock
}
+type PoolManagerController_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *PoolManagerController) EXPECT() *PoolManagerController_Expecter {
+ return &PoolManagerController_Expecter{mock: &_m.Mock}
+}
+
// CreateEnterprisePoolManager provides a mock function with given fields: ctx, enterprise, providers, store
func (_m *PoolManagerController) CreateEnterprisePoolManager(ctx context.Context, enterprise params.Enterprise, providers map[string]common.Provider, store databasecommon.Store) (common.PoolManager, error) {
ret := _m.Called(ctx, enterprise, providers, store)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEnterprisePoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.Enterprise, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)); ok {
@@ -45,10 +57,45 @@ func (_m *PoolManagerController) CreateEnterprisePoolManager(ctx context.Context
return r0, r1
}
+// PoolManagerController_CreateEnterprisePoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEnterprisePoolManager'
+type PoolManagerController_CreateEnterprisePoolManager_Call struct {
+ *mock.Call
+}
+
+// CreateEnterprisePoolManager is a helper method to define mock.On call
+// - ctx context.Context
+// - enterprise params.Enterprise
+// - providers map[string]common.Provider
+// - store databasecommon.Store
+func (_e *PoolManagerController_Expecter) CreateEnterprisePoolManager(ctx interface{}, enterprise interface{}, providers interface{}, store interface{}) *PoolManagerController_CreateEnterprisePoolManager_Call {
+ return &PoolManagerController_CreateEnterprisePoolManager_Call{Call: _e.mock.On("CreateEnterprisePoolManager", ctx, enterprise, providers, store)}
+}
+
+func (_c *PoolManagerController_CreateEnterprisePoolManager_Call) Run(run func(ctx context.Context, enterprise params.Enterprise, providers map[string]common.Provider, store databasecommon.Store)) *PoolManagerController_CreateEnterprisePoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.Enterprise), args[2].(map[string]common.Provider), args[3].(databasecommon.Store))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_CreateEnterprisePoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_CreateEnterprisePoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_CreateEnterprisePoolManager_Call) RunAndReturn(run func(context.Context, params.Enterprise, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)) *PoolManagerController_CreateEnterprisePoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateOrgPoolManager provides a mock function with given fields: ctx, org, providers, store
func (_m *PoolManagerController) CreateOrgPoolManager(ctx context.Context, org params.Organization, providers map[string]common.Provider, store databasecommon.Store) (common.PoolManager, error) {
ret := _m.Called(ctx, org, providers, store)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateOrgPoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.Organization, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)); ok {
@@ -71,10 +118,45 @@ func (_m *PoolManagerController) CreateOrgPoolManager(ctx context.Context, org p
return r0, r1
}
+// PoolManagerController_CreateOrgPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateOrgPoolManager'
+type PoolManagerController_CreateOrgPoolManager_Call struct {
+ *mock.Call
+}
+
+// CreateOrgPoolManager is a helper method to define mock.On call
+// - ctx context.Context
+// - org params.Organization
+// - providers map[string]common.Provider
+// - store databasecommon.Store
+func (_e *PoolManagerController_Expecter) CreateOrgPoolManager(ctx interface{}, org interface{}, providers interface{}, store interface{}) *PoolManagerController_CreateOrgPoolManager_Call {
+ return &PoolManagerController_CreateOrgPoolManager_Call{Call: _e.mock.On("CreateOrgPoolManager", ctx, org, providers, store)}
+}
+
+func (_c *PoolManagerController_CreateOrgPoolManager_Call) Run(run func(ctx context.Context, org params.Organization, providers map[string]common.Provider, store databasecommon.Store)) *PoolManagerController_CreateOrgPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.Organization), args[2].(map[string]common.Provider), args[3].(databasecommon.Store))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_CreateOrgPoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_CreateOrgPoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_CreateOrgPoolManager_Call) RunAndReturn(run func(context.Context, params.Organization, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)) *PoolManagerController_CreateOrgPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateRepoPoolManager provides a mock function with given fields: ctx, repo, providers, store
func (_m *PoolManagerController) CreateRepoPoolManager(ctx context.Context, repo params.Repository, providers map[string]common.Provider, store databasecommon.Store) (common.PoolManager, error) {
ret := _m.Called(ctx, repo, providers, store)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateRepoPoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.Repository, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)); ok {
@@ -97,10 +179,45 @@ func (_m *PoolManagerController) CreateRepoPoolManager(ctx context.Context, repo
return r0, r1
}
+// PoolManagerController_CreateRepoPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateRepoPoolManager'
+type PoolManagerController_CreateRepoPoolManager_Call struct {
+ *mock.Call
+}
+
+// CreateRepoPoolManager is a helper method to define mock.On call
+// - ctx context.Context
+// - repo params.Repository
+// - providers map[string]common.Provider
+// - store databasecommon.Store
+func (_e *PoolManagerController_Expecter) CreateRepoPoolManager(ctx interface{}, repo interface{}, providers interface{}, store interface{}) *PoolManagerController_CreateRepoPoolManager_Call {
+ return &PoolManagerController_CreateRepoPoolManager_Call{Call: _e.mock.On("CreateRepoPoolManager", ctx, repo, providers, store)}
+}
+
+func (_c *PoolManagerController_CreateRepoPoolManager_Call) Run(run func(ctx context.Context, repo params.Repository, providers map[string]common.Provider, store databasecommon.Store)) *PoolManagerController_CreateRepoPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.Repository), args[2].(map[string]common.Provider), args[3].(databasecommon.Store))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_CreateRepoPoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_CreateRepoPoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_CreateRepoPoolManager_Call) RunAndReturn(run func(context.Context, params.Repository, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)) *PoolManagerController_CreateRepoPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteEnterprisePoolManager provides a mock function with given fields: enterprise
func (_m *PoolManagerController) DeleteEnterprisePoolManager(enterprise params.Enterprise) error {
ret := _m.Called(enterprise)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEnterprisePoolManager")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(params.Enterprise) error); ok {
r0 = rf(enterprise)
@@ -111,10 +228,42 @@ func (_m *PoolManagerController) DeleteEnterprisePoolManager(enterprise params.E
return r0
}
+// PoolManagerController_DeleteEnterprisePoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEnterprisePoolManager'
+type PoolManagerController_DeleteEnterprisePoolManager_Call struct {
+ *mock.Call
+}
+
+// DeleteEnterprisePoolManager is a helper method to define mock.On call
+// - enterprise params.Enterprise
+func (_e *PoolManagerController_Expecter) DeleteEnterprisePoolManager(enterprise interface{}) *PoolManagerController_DeleteEnterprisePoolManager_Call {
+ return &PoolManagerController_DeleteEnterprisePoolManager_Call{Call: _e.mock.On("DeleteEnterprisePoolManager", enterprise)}
+}
+
+func (_c *PoolManagerController_DeleteEnterprisePoolManager_Call) Run(run func(enterprise params.Enterprise)) *PoolManagerController_DeleteEnterprisePoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Enterprise))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteEnterprisePoolManager_Call) Return(_a0 error) *PoolManagerController_DeleteEnterprisePoolManager_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteEnterprisePoolManager_Call) RunAndReturn(run func(params.Enterprise) error) *PoolManagerController_DeleteEnterprisePoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteOrgPoolManager provides a mock function with given fields: org
func (_m *PoolManagerController) DeleteOrgPoolManager(org params.Organization) error {
ret := _m.Called(org)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteOrgPoolManager")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(params.Organization) error); ok {
r0 = rf(org)
@@ -125,10 +274,42 @@ func (_m *PoolManagerController) DeleteOrgPoolManager(org params.Organization) e
return r0
}
+// PoolManagerController_DeleteOrgPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteOrgPoolManager'
+type PoolManagerController_DeleteOrgPoolManager_Call struct {
+ *mock.Call
+}
+
+// DeleteOrgPoolManager is a helper method to define mock.On call
+// - org params.Organization
+func (_e *PoolManagerController_Expecter) DeleteOrgPoolManager(org interface{}) *PoolManagerController_DeleteOrgPoolManager_Call {
+ return &PoolManagerController_DeleteOrgPoolManager_Call{Call: _e.mock.On("DeleteOrgPoolManager", org)}
+}
+
+func (_c *PoolManagerController_DeleteOrgPoolManager_Call) Run(run func(org params.Organization)) *PoolManagerController_DeleteOrgPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Organization))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteOrgPoolManager_Call) Return(_a0 error) *PoolManagerController_DeleteOrgPoolManager_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteOrgPoolManager_Call) RunAndReturn(run func(params.Organization) error) *PoolManagerController_DeleteOrgPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteRepoPoolManager provides a mock function with given fields: repo
func (_m *PoolManagerController) DeleteRepoPoolManager(repo params.Repository) error {
ret := _m.Called(repo)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteRepoPoolManager")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(params.Repository) error); ok {
r0 = rf(repo)
@@ -139,10 +320,42 @@ func (_m *PoolManagerController) DeleteRepoPoolManager(repo params.Repository) e
return r0
}
+// PoolManagerController_DeleteRepoPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteRepoPoolManager'
+type PoolManagerController_DeleteRepoPoolManager_Call struct {
+ *mock.Call
+}
+
+// DeleteRepoPoolManager is a helper method to define mock.On call
+// - repo params.Repository
+func (_e *PoolManagerController_Expecter) DeleteRepoPoolManager(repo interface{}) *PoolManagerController_DeleteRepoPoolManager_Call {
+ return &PoolManagerController_DeleteRepoPoolManager_Call{Call: _e.mock.On("DeleteRepoPoolManager", repo)}
+}
+
+func (_c *PoolManagerController_DeleteRepoPoolManager_Call) Run(run func(repo params.Repository)) *PoolManagerController_DeleteRepoPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Repository))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteRepoPoolManager_Call) Return(_a0 error) *PoolManagerController_DeleteRepoPoolManager_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteRepoPoolManager_Call) RunAndReturn(run func(params.Repository) error) *PoolManagerController_DeleteRepoPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetEnterprisePoolManager provides a mock function with given fields: enterprise
func (_m *PoolManagerController) GetEnterprisePoolManager(enterprise params.Enterprise) (common.PoolManager, error) {
ret := _m.Called(enterprise)
+ if len(ret) == 0 {
+ panic("no return value specified for GetEnterprisePoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(params.Enterprise) (common.PoolManager, error)); ok {
@@ -165,10 +378,42 @@ func (_m *PoolManagerController) GetEnterprisePoolManager(enterprise params.Ente
return r0, r1
}
-// GetEnterprisePoolManagers provides a mock function with given fields:
+// PoolManagerController_GetEnterprisePoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnterprisePoolManager'
+type PoolManagerController_GetEnterprisePoolManager_Call struct {
+ *mock.Call
+}
+
+// GetEnterprisePoolManager is a helper method to define mock.On call
+// - enterprise params.Enterprise
+func (_e *PoolManagerController_Expecter) GetEnterprisePoolManager(enterprise interface{}) *PoolManagerController_GetEnterprisePoolManager_Call {
+ return &PoolManagerController_GetEnterprisePoolManager_Call{Call: _e.mock.On("GetEnterprisePoolManager", enterprise)}
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManager_Call) Run(run func(enterprise params.Enterprise)) *PoolManagerController_GetEnterprisePoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Enterprise))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_GetEnterprisePoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManager_Call) RunAndReturn(run func(params.Enterprise) (common.PoolManager, error)) *PoolManagerController_GetEnterprisePoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEnterprisePoolManagers provides a mock function with no fields
func (_m *PoolManagerController) GetEnterprisePoolManagers() (map[string]common.PoolManager, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for GetEnterprisePoolManagers")
+ }
+
var r0 map[string]common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func() (map[string]common.PoolManager, error)); ok {
@@ -191,10 +436,41 @@ func (_m *PoolManagerController) GetEnterprisePoolManagers() (map[string]common.
return r0, r1
}
+// PoolManagerController_GetEnterprisePoolManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnterprisePoolManagers'
+type PoolManagerController_GetEnterprisePoolManagers_Call struct {
+ *mock.Call
+}
+
+// GetEnterprisePoolManagers is a helper method to define mock.On call
+func (_e *PoolManagerController_Expecter) GetEnterprisePoolManagers() *PoolManagerController_GetEnterprisePoolManagers_Call {
+ return &PoolManagerController_GetEnterprisePoolManagers_Call{Call: _e.mock.On("GetEnterprisePoolManagers")}
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManagers_Call) Run(run func()) *PoolManagerController_GetEnterprisePoolManagers_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManagers_Call) Return(_a0 map[string]common.PoolManager, _a1 error) *PoolManagerController_GetEnterprisePoolManagers_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManagers_Call) RunAndReturn(run func() (map[string]common.PoolManager, error)) *PoolManagerController_GetEnterprisePoolManagers_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetOrgPoolManager provides a mock function with given fields: org
func (_m *PoolManagerController) GetOrgPoolManager(org params.Organization) (common.PoolManager, error) {
ret := _m.Called(org)
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrgPoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(params.Organization) (common.PoolManager, error)); ok {
@@ -217,10 +493,42 @@ func (_m *PoolManagerController) GetOrgPoolManager(org params.Organization) (com
return r0, r1
}
-// GetOrgPoolManagers provides a mock function with given fields:
+// PoolManagerController_GetOrgPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrgPoolManager'
+type PoolManagerController_GetOrgPoolManager_Call struct {
+ *mock.Call
+}
+
+// GetOrgPoolManager is a helper method to define mock.On call
+// - org params.Organization
+func (_e *PoolManagerController_Expecter) GetOrgPoolManager(org interface{}) *PoolManagerController_GetOrgPoolManager_Call {
+ return &PoolManagerController_GetOrgPoolManager_Call{Call: _e.mock.On("GetOrgPoolManager", org)}
+}
+
+func (_c *PoolManagerController_GetOrgPoolManager_Call) Run(run func(org params.Organization)) *PoolManagerController_GetOrgPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Organization))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetOrgPoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_GetOrgPoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetOrgPoolManager_Call) RunAndReturn(run func(params.Organization) (common.PoolManager, error)) *PoolManagerController_GetOrgPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetOrgPoolManagers provides a mock function with no fields
func (_m *PoolManagerController) GetOrgPoolManagers() (map[string]common.PoolManager, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrgPoolManagers")
+ }
+
var r0 map[string]common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func() (map[string]common.PoolManager, error)); ok {
@@ -243,10 +551,41 @@ func (_m *PoolManagerController) GetOrgPoolManagers() (map[string]common.PoolMan
return r0, r1
}
+// PoolManagerController_GetOrgPoolManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrgPoolManagers'
+type PoolManagerController_GetOrgPoolManagers_Call struct {
+ *mock.Call
+}
+
+// GetOrgPoolManagers is a helper method to define mock.On call
+func (_e *PoolManagerController_Expecter) GetOrgPoolManagers() *PoolManagerController_GetOrgPoolManagers_Call {
+ return &PoolManagerController_GetOrgPoolManagers_Call{Call: _e.mock.On("GetOrgPoolManagers")}
+}
+
+func (_c *PoolManagerController_GetOrgPoolManagers_Call) Run(run func()) *PoolManagerController_GetOrgPoolManagers_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetOrgPoolManagers_Call) Return(_a0 map[string]common.PoolManager, _a1 error) *PoolManagerController_GetOrgPoolManagers_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetOrgPoolManagers_Call) RunAndReturn(run func() (map[string]common.PoolManager, error)) *PoolManagerController_GetOrgPoolManagers_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetRepoPoolManager provides a mock function with given fields: repo
func (_m *PoolManagerController) GetRepoPoolManager(repo params.Repository) (common.PoolManager, error) {
ret := _m.Called(repo)
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepoPoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(params.Repository) (common.PoolManager, error)); ok {
@@ -269,10 +608,42 @@ func (_m *PoolManagerController) GetRepoPoolManager(repo params.Repository) (com
return r0, r1
}
-// GetRepoPoolManagers provides a mock function with given fields:
+// PoolManagerController_GetRepoPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepoPoolManager'
+type PoolManagerController_GetRepoPoolManager_Call struct {
+ *mock.Call
+}
+
+// GetRepoPoolManager is a helper method to define mock.On call
+// - repo params.Repository
+func (_e *PoolManagerController_Expecter) GetRepoPoolManager(repo interface{}) *PoolManagerController_GetRepoPoolManager_Call {
+ return &PoolManagerController_GetRepoPoolManager_Call{Call: _e.mock.On("GetRepoPoolManager", repo)}
+}
+
+func (_c *PoolManagerController_GetRepoPoolManager_Call) Run(run func(repo params.Repository)) *PoolManagerController_GetRepoPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Repository))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetRepoPoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_GetRepoPoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetRepoPoolManager_Call) RunAndReturn(run func(params.Repository) (common.PoolManager, error)) *PoolManagerController_GetRepoPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetRepoPoolManagers provides a mock function with no fields
func (_m *PoolManagerController) GetRepoPoolManagers() (map[string]common.PoolManager, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepoPoolManagers")
+ }
+
var r0 map[string]common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func() (map[string]common.PoolManager, error)); ok {
@@ -295,82 +666,31 @@ func (_m *PoolManagerController) GetRepoPoolManagers() (map[string]common.PoolMa
return r0, r1
}
-// UpdateEnterprisePoolManager provides a mock function with given fields: ctx, enterprise
-func (_m *PoolManagerController) UpdateEnterprisePoolManager(ctx context.Context, enterprise params.Enterprise) (common.PoolManager, error) {
- ret := _m.Called(ctx, enterprise)
-
- var r0 common.PoolManager
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, params.Enterprise) (common.PoolManager, error)); ok {
- return rf(ctx, enterprise)
- }
- if rf, ok := ret.Get(0).(func(context.Context, params.Enterprise) common.PoolManager); ok {
- r0 = rf(ctx, enterprise)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(common.PoolManager)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, params.Enterprise) error); ok {
- r1 = rf(ctx, enterprise)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// PoolManagerController_GetRepoPoolManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepoPoolManagers'
+type PoolManagerController_GetRepoPoolManagers_Call struct {
+ *mock.Call
}
-// UpdateOrgPoolManager provides a mock function with given fields: ctx, org
-func (_m *PoolManagerController) UpdateOrgPoolManager(ctx context.Context, org params.Organization) (common.PoolManager, error) {
- ret := _m.Called(ctx, org)
-
- var r0 common.PoolManager
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, params.Organization) (common.PoolManager, error)); ok {
- return rf(ctx, org)
- }
- if rf, ok := ret.Get(0).(func(context.Context, params.Organization) common.PoolManager); ok {
- r0 = rf(ctx, org)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(common.PoolManager)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, params.Organization) error); ok {
- r1 = rf(ctx, org)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// GetRepoPoolManagers is a helper method to define mock.On call
+func (_e *PoolManagerController_Expecter) GetRepoPoolManagers() *PoolManagerController_GetRepoPoolManagers_Call {
+ return &PoolManagerController_GetRepoPoolManagers_Call{Call: _e.mock.On("GetRepoPoolManagers")}
}
-// UpdateRepoPoolManager provides a mock function with given fields: ctx, repo
-func (_m *PoolManagerController) UpdateRepoPoolManager(ctx context.Context, repo params.Repository) (common.PoolManager, error) {
- ret := _m.Called(ctx, repo)
+func (_c *PoolManagerController_GetRepoPoolManagers_Call) Run(run func()) *PoolManagerController_GetRepoPoolManagers_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
- var r0 common.PoolManager
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, params.Repository) (common.PoolManager, error)); ok {
- return rf(ctx, repo)
- }
- if rf, ok := ret.Get(0).(func(context.Context, params.Repository) common.PoolManager); ok {
- r0 = rf(ctx, repo)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(common.PoolManager)
- }
- }
+func (_c *PoolManagerController_GetRepoPoolManagers_Call) Return(_a0 map[string]common.PoolManager, _a1 error) *PoolManagerController_GetRepoPoolManagers_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
- if rf, ok := ret.Get(1).(func(context.Context, params.Repository) error); ok {
- r1 = rf(ctx, repo)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+func (_c *PoolManagerController_GetRepoPoolManagers_Call) RunAndReturn(run func() (map[string]common.PoolManager, error)) *PoolManagerController_GetRepoPoolManagers_Call {
+ _c.Call.Return(run)
+ return _c
}
// NewPoolManagerController creates a new instance of PoolManagerController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
diff --git a/runner/organizations.go b/runner/organizations.go
index 61ebcc6a..ffdd1c6c 100644
--- a/runner/organizations.go
+++ b/runner/organizations.go
@@ -16,8 +16,9 @@ package runner
import (
"context"
+ "errors"
"fmt"
- "log"
+ "log/slog"
"strings"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
@@ -25,8 +26,6 @@ import (
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/pkg/errors"
)
func (r *Runner) CreateOrganization(ctx context.Context, param params.CreateOrgParams) (org params.Organization, err error) {
@@ -35,57 +34,74 @@ func (r *Runner) CreateOrganization(ctx context.Context, param params.CreateOrgP
}
if err := param.Validate(); err != nil {
- return params.Organization{}, errors.Wrap(err, "validating params")
+ return params.Organization{}, fmt.Errorf("error validating params: %w", err)
}
- creds, ok := r.credentials[param.CredentialsName]
- if !ok {
+ var creds params.ForgeCredentials
+ switch param.ForgeType {
+ case params.GithubEndpointType:
+ slog.DebugContext(ctx, "getting github credentials")
+ creds, err = r.store.GetGithubCredentialsByName(ctx, param.CredentialsName, true)
+ case params.GiteaEndpointType:
+ slog.DebugContext(ctx, "getting gitea credentials")
+ creds, err = r.store.GetGiteaCredentialsByName(ctx, param.CredentialsName, true)
+ default:
+ creds, err = r.ResolveForgeCredentialByName(ctx, param.CredentialsName)
+ }
+
+ if err != nil {
return params.Organization{}, runnerErrors.NewBadRequestError("credentials %s not defined", param.CredentialsName)
}
- _, err = r.store.GetOrganization(ctx, param.Name)
+ _, err = r.store.GetOrganization(ctx, param.Name, creds.Endpoint.Name)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
} else {
return params.Organization{}, runnerErrors.NewConflictError("organization %s already exists", param.Name)
}
- org, err = r.store.CreateOrganization(ctx, param.Name, creds.Name, param.WebhookSecret)
+ org, err = r.store.CreateOrganization(ctx, param.Name, creds, param.WebhookSecret, param.PoolBalancerType)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "creating organization")
+ return params.Organization{}, fmt.Errorf("error creating organization: %w", err)
}
defer func() {
if err != nil {
if deleteErr := r.store.DeleteOrganization(ctx, org.ID); deleteErr != nil {
- log.Printf("failed to delete org: %s", deleteErr)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to delete org",
+ "org_id", org.ID)
}
}
}()
+ // Use the admin context in the pool manager. Any access control is already done above when
+ // updating the store.
poolMgr, err := r.poolManagerCtrl.CreateOrgPoolManager(r.ctx, org, r.providers, r.store)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "creating org pool manager")
+ return params.Organization{}, fmt.Errorf("error creating org pool manager: %w", err)
}
if err := poolMgr.Start(); err != nil {
if deleteErr := r.poolManagerCtrl.DeleteOrgPoolManager(org); deleteErr != nil {
- log.Printf("failed to cleanup pool manager for org %s", org.ID)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to cleanup pool manager for org",
+ "org_id", org.ID)
}
- return params.Organization{}, errors.Wrap(err, "starting org pool manager")
+ return params.Organization{}, fmt.Errorf("error starting org pool manager: %w", err)
}
return org, nil
}
-func (r *Runner) ListOrganizations(ctx context.Context) ([]params.Organization, error) {
+func (r *Runner) ListOrganizations(ctx context.Context, filter params.OrganizationFilter) ([]params.Organization, error) {
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
- orgs, err := r.store.ListOrganizations(ctx)
+ orgs, err := r.store.ListOrganizations(ctx, filter)
if err != nil {
- return nil, errors.Wrap(err, "listing organizations")
+ return nil, fmt.Errorf("error listing organizations: %w", err)
}
var allOrgs []params.Organization
@@ -112,7 +128,7 @@ func (r *Runner) GetOrganizationByID(ctx context.Context, orgID string) (params.
org, err := r.store.GetOrganizationByID(ctx, orgID)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching organization")
+ return params.Organization{}, fmt.Errorf("error fetching organization: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
@@ -124,36 +140,65 @@ func (r *Runner) GetOrganizationByID(ctx context.Context, orgID string) (params.
return org, nil
}
-func (r *Runner) DeleteOrganization(ctx context.Context, orgID string) error {
+func (r *Runner) DeleteOrganization(ctx context.Context, orgID string, keepWebhook bool) error {
if !auth.IsAdmin(ctx) {
return runnerErrors.ErrUnauthorized
}
org, err := r.store.GetOrganizationByID(ctx, orgID)
if err != nil {
- return errors.Wrap(err, "fetching org")
+ return fmt.Errorf("error fetching org: %w", err)
}
- pools, err := r.store.ListOrgPools(ctx, orgID)
+ entity, err := org.GetEntity()
if err != nil {
- return errors.Wrap(err, "fetching org pools")
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ pools, err := r.store.ListEntityPools(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching org pools: %w", err)
}
if len(pools) > 0 {
- poolIds := []string{}
+ poolIDs := []string{}
for _, pool := range pools {
- poolIds = append(poolIds, pool.ID)
+ poolIDs = append(poolIDs, pool.ID)
}
- return runnerErrors.NewBadRequestError("org has pools defined (%s)", strings.Join(poolIds, ", "))
+ return runnerErrors.NewBadRequestError("org has pools defined (%s)", strings.Join(poolIDs, ", "))
+ }
+
+ scaleSets, err := r.store.ListEntityScaleSets(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching organization scale sets: %w", err)
+ }
+
+ if len(scaleSets) > 0 {
+ return runnerErrors.NewBadRequestError("organization has scale sets defined; delete them first")
+ }
+
+ if !keepWebhook && r.config.Default.EnableWebhookManagement {
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager: %w", err)
+ }
+
+ if err := poolMgr.UninstallWebhook(ctx); err != nil {
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): Should we error out here?
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to uninstall webhook",
+ "org_id", org.ID)
+ }
}
if err := r.poolManagerCtrl.DeleteOrgPoolManager(org); err != nil {
- return errors.Wrap(err, "deleting org pool manager")
+ return fmt.Errorf("error deleting org pool manager: %w", err)
}
if err := r.store.DeleteOrganization(ctx, orgID); err != nil {
- return errors.Wrapf(err, "removing organization %s", orgID)
+ return fmt.Errorf("error removing organization %s: %w", orgID, err)
}
return nil
}
@@ -166,26 +211,20 @@ func (r *Runner) UpdateOrganization(ctx context.Context, orgID string, param par
r.mux.Lock()
defer r.mux.Unlock()
- org, err := r.store.GetOrganizationByID(ctx, orgID)
- if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ switch param.PoolBalancerType {
+ case params.PoolBalancerTypeRoundRobin, params.PoolBalancerTypePack, params.PoolBalancerTypeNone:
+ default:
+ return params.Organization{}, runnerErrors.NewBadRequestError("invalid pool balancer type: %s", param.PoolBalancerType)
}
- if param.CredentialsName != "" {
- // Check that credentials are set before saving to db
- if _, ok := r.credentials[param.CredentialsName]; !ok {
- return params.Organization{}, runnerErrors.NewBadRequestError("invalid credentials (%s) for org %s", param.CredentialsName, org.Name)
- }
+ org, err := r.store.UpdateOrganization(ctx, orgID, param)
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error updating org: %w", err)
}
- org, err = r.store.UpdateOrganization(ctx, orgID, param)
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "updating org")
- }
-
- poolMgr, err := r.poolManagerCtrl.UpdateOrgPoolManager(r.ctx, org)
- if err != nil {
- return params.Organization{}, fmt.Errorf("updating org pool manager: %w", err)
+ return params.Organization{}, fmt.Errorf("failed to get org pool manager: %w", err)
}
org.PoolManagerStatus = poolMgr.Status()
@@ -197,30 +236,23 @@ func (r *Runner) CreateOrgPool(ctx context.Context, orgID string, param params.C
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- r.mux.Lock()
- defer r.mux.Unlock()
-
- org, err := r.store.GetOrganizationByID(ctx, orgID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching org")
- }
-
- if _, err := r.poolManagerCtrl.GetOrgPoolManager(org); err != nil {
- return params.Pool{}, runnerErrors.ErrNotFound
- }
-
createPoolParams, err := r.appendTagsToCreatePoolParams(param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool params")
+ return params.Pool{}, fmt.Errorf("error fetching pool params: %w", err)
}
if param.RunnerBootstrapTimeout == 0 {
param.RunnerBootstrapTimeout = appdefaults.DefaultRunnerBootstrapTimeout
}
- pool, err := r.store.CreateOrganizationPool(ctx, orgID, createPoolParams)
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+
+ pool, err := r.store.CreateEntityPool(ctx, entity, createPoolParams)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "creating pool")
+ return params.Pool{}, fmt.Errorf("error creating pool: %w", err)
}
return pool, nil
@@ -231,10 +263,16 @@ func (r *Runner) GetOrgPoolByID(ctx context.Context, orgID, poolID string) (para
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetOrganizationPool(ctx, orgID, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
}
+
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
+ }
+
return pool, nil
}
@@ -243,28 +281,31 @@ func (r *Runner) DeleteOrgPool(ctx context.Context, orgID, poolID string) error
return runnerErrors.ErrUnauthorized
}
- // TODO: dedup instance count verification
- pool, err := r.store.GetOrganizationPool(ctx, orgID, poolID)
- if err != nil {
- return errors.Wrap(err, "fetching pool")
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
}
- instances, err := r.store.ListPoolInstances(ctx, pool.ID)
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return errors.Wrap(err, "fetching instances")
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("error fetching pool: %w", err)
+ }
+ return nil
}
+ // nolint:golangci-lint,godox
// TODO: implement a count function
- if len(instances) > 0 {
+ if len(pool.Instances) > 0 {
runnerIDs := []string{}
- for _, run := range instances {
+ for _, run := range pool.Instances {
runnerIDs = append(runnerIDs, run.ID)
}
return runnerErrors.NewBadRequestError("pool has runners: %s", strings.Join(runnerIDs, ", "))
}
- if err := r.store.DeleteOrganizationPool(ctx, orgID, poolID); err != nil {
- return errors.Wrap(err, "deleting pool")
+ if err := r.store.DeleteEntityPool(ctx, entity, poolID); err != nil {
+ return fmt.Errorf("error deleting pool: %w", err)
}
return nil
}
@@ -273,10 +314,13 @@ func (r *Runner) ListOrgPools(ctx context.Context, orgID string) ([]params.Pool,
if !auth.IsAdmin(ctx) {
return []params.Pool{}, runnerErrors.ErrUnauthorized
}
-
- pools, err := r.store.ListOrgPools(ctx, orgID)
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pools, err := r.store.ListEntityPools(ctx, entity)
if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
@@ -286,9 +330,14 @@ func (r *Runner) UpdateOrgPool(ctx context.Context, orgID, poolID string, param
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetOrganizationPool(ctx, orgID, poolID)
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
maxRunners := pool.MaxRunners
@@ -305,9 +354,9 @@ func (r *Runner) UpdateOrgPool(ctx context.Context, orgID, poolID string, param
return params.Pool{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
}
- newPool, err := r.store.UpdateOrganizationPool(ctx, orgID, poolID, param)
+ newPool, err := r.store.UpdateEntityPool(ctx, entity, poolID, param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "updating pool")
+ return params.Pool{}, fmt.Errorf("error updating pool: %w", err)
}
return newPool, nil
}
@@ -317,25 +366,95 @@ func (r *Runner) ListOrgInstances(ctx context.Context, orgID string) ([]params.I
return nil, runnerErrors.ErrUnauthorized
}
- instances, err := r.store.ListOrgInstances(ctx, orgID)
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+
+ instances, err := r.store.ListEntityInstances(ctx, entity)
if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching instances")
+ return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err)
}
return instances, nil
}
-func (r *Runner) findOrgPoolManager(name string) (common.PoolManager, error) {
+func (r *Runner) findOrgPoolManager(name, endpointName string) (common.PoolManager, error) {
r.mux.Lock()
defer r.mux.Unlock()
- org, err := r.store.GetOrganization(r.ctx, name)
+ org, err := r.store.GetOrganization(r.ctx, name, endpointName)
if err != nil {
- return nil, errors.Wrap(err, "fetching org")
+ return nil, fmt.Errorf("error fetching org: %w", err)
}
poolManager, err := r.poolManagerCtrl.GetOrgPoolManager(org)
if err != nil {
- return nil, errors.Wrap(err, "fetching pool manager for org")
+ return nil, fmt.Errorf("error fetching pool manager for org: %w", err)
}
return poolManager, nil
}
+
+func (r *Runner) InstallOrgWebhook(ctx context.Context, orgID string, param params.InstallWebhookParams) (params.HookInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.HookInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ org, err := r.store.GetOrganizationByID(ctx, orgID)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching org: %w", err)
+ }
+
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching pool manager for org: %w", err)
+ }
+
+ info, err := poolMgr.InstallWebhook(ctx, param)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error installing webhook: %w", err)
+ }
+ return info, nil
+}
+
+func (r *Runner) UninstallOrgWebhook(ctx context.Context, orgID string) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ org, err := r.store.GetOrganizationByID(ctx, orgID)
+ if err != nil {
+ return fmt.Errorf("error fetching org: %w", err)
+ }
+
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager for org: %w", err)
+ }
+
+ if err := poolMgr.UninstallWebhook(ctx); err != nil {
+ return fmt.Errorf("error uninstalling webhook: %w", err)
+ }
+ return nil
+}
+
+func (r *Runner) GetOrgWebhookInfo(ctx context.Context, orgID string) (params.HookInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.HookInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ org, err := r.store.GetOrganizationByID(ctx, orgID)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching org: %w", err)
+ }
+
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching pool manager for org: %w", err)
+ }
+
+ info, err := poolMgr.GetWebhookInfo(ctx)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching webhook info: %w", err)
+ }
+ return info, nil
+}
diff --git a/runner/organizations_test.go b/runner/organizations_test.go
index f6c1e170..8d2aa3f6 100644
--- a/runner/organizations_test.go
+++ b/runner/organizations_test.go
@@ -16,12 +16,14 @@ package runner
import (
"context"
+ "errors"
"fmt"
"testing"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database"
dbCommon "github.com/cloudbase/garm/database/common"
garmTesting "github.com/cloudbase/garm/internal/testing"
@@ -29,46 +31,54 @@ import (
"github.com/cloudbase/garm/runner/common"
runnerCommonMocks "github.com/cloudbase/garm/runner/common/mocks"
runnerMocks "github.com/cloudbase/garm/runner/mocks"
-
- "github.com/stretchr/testify/mock"
- "github.com/stretchr/testify/suite"
)
type OrgTestFixtures struct {
- AdminContext context.Context
- DBFile string
- Store dbCommon.Store
- StoreOrgs map[string]params.Organization
- Providers map[string]common.Provider
- Credentials map[string]config.Github
- CreateOrgParams params.CreateOrgParams
- CreatePoolParams params.CreatePoolParams
- CreateInstanceParams params.CreateInstanceParams
- UpdateRepoParams params.UpdateEntityParams
- UpdatePoolParams params.UpdatePoolParams
- UpdatePoolStateParams params.UpdatePoolStateParams
- ErrMock error
- ProviderMock *runnerCommonMocks.Provider
- PoolMgrMock *runnerCommonMocks.PoolManager
- PoolMgrCtrlMock *runnerMocks.PoolManagerController
+ AdminContext context.Context
+ DBFile string
+ Store dbCommon.Store
+ StoreOrgs map[string]params.Organization
+ Providers map[string]common.Provider
+ Credentials map[string]params.ForgeCredentials
+ CreateOrgParams params.CreateOrgParams
+ CreatePoolParams params.CreatePoolParams
+ CreateInstanceParams params.CreateInstanceParams
+ UpdateRepoParams params.UpdateEntityParams
+ UpdatePoolParams params.UpdatePoolParams
+ ErrMock error
+ ProviderMock *runnerCommonMocks.Provider
+ PoolMgrMock *runnerCommonMocks.PoolManager
+ PoolMgrCtrlMock *runnerMocks.PoolManagerController
}
type OrgTestSuite struct {
suite.Suite
Fixtures *OrgTestFixtures
Runner *Runner
+
+ testCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ giteaTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ giteaEndpoint params.ForgeEndpoint
}
func (s *OrgTestSuite) SetupTest() {
- adminCtx := auth.GetAdminContext()
-
// create testing sqlite database
dbCfg := garmTesting.GetTestSqliteDBConfig(s.T())
- db, err := database.NewDatabase(adminCtx, dbCfg)
+ db, err := database.NewDatabase(context.Background(), dbCfg)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.giteaEndpoint = garmTesting.CreateDefaultGiteaEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.giteaTestCreds = garmTesting.CreateTestGiteaCredentials(adminCtx, "gitea-creds", db, s.T(), s.giteaEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create some organization objects in the database, for testing purposes
orgs := map[string]params.Organization{}
for i := 1; i <= 3; i++ {
@@ -76,8 +86,9 @@ func (s *OrgTestSuite) SetupTest() {
org, err := db.CreateOrganization(
adminCtx,
name,
- fmt.Sprintf("test-creds-%v", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%v", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create database object (test-org-%v)", i))
@@ -97,16 +108,13 @@ func (s *OrgTestSuite) SetupTest() {
Providers: map[string]common.Provider{
"test-provider": providerMock,
},
- Credentials: map[string]config.Github{
- "test-creds": {
- Name: "test-creds-name",
- Description: "test-creds-description",
- OAuth2Token: "test-creds-oauth2-token",
- },
+ Credentials: map[string]params.ForgeCredentials{
+ s.testCreds.Name: s.testCreds,
+ s.secondaryTestCreds.Name: s.secondaryTestCreds,
},
CreateOrgParams: params.CreateOrgParams{
Name: "test-org-create",
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-create-org-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -117,7 +125,7 @@ func (s *OrgTestSuite) SetupTest() {
Flavor: "test",
OSType: "linux",
OSArch: "arm64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"arm64-linux-runner"},
RunnerBootstrapTimeout: 0,
},
CreateInstanceParams: params.CreateInstanceParams{
@@ -125,7 +133,7 @@ func (s *OrgTestSuite) SetupTest() {
OSType: "linux",
},
UpdateRepoParams: params.UpdateEntityParams{
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -134,9 +142,6 @@ func (s *OrgTestSuite) SetupTest() {
Image: "test-images-updated",
Flavor: "test-flavor-updated",
},
- UpdatePoolStateParams: params.UpdatePoolStateParams{
- WebhookSecret: "test-update-repo-webhook-secret",
- },
ErrMock: fmt.Errorf("mock error"),
ProviderMock: providerMock,
PoolMgrMock: runnerCommonMocks.NewPoolManager(s.T()),
@@ -147,7 +152,6 @@ func (s *OrgTestSuite) SetupTest() {
// setup test runner
runner := &Runner{
providers: fixtures.Providers,
- credentials: fixtures.Credentials,
ctx: fixtures.AdminContext,
store: fixtures.Store,
poolManagerCtrl: fixtures.PoolMgrCtrlMock,
@@ -168,7 +172,21 @@ func (s *OrgTestSuite) TestCreateOrganization() {
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.CreateOrgParams.Name, org.Name)
- s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateOrgParams.CredentialsName].Name, org.CredentialsName)
+ s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateOrgParams.CredentialsName].Name, org.Credentials.Name)
+ s.Require().Equal(params.PoolBalancerTypeRoundRobin, org.PoolBalancerType)
+}
+
+func (s *OrgTestSuite) TestCreateOrganizationPoolBalancerTypePack() {
+ s.Fixtures.CreateOrgParams.PoolBalancerType = params.PoolBalancerTypePack
+ s.Fixtures.PoolMgrMock.On("Start").Return(nil)
+ s.Fixtures.PoolMgrCtrlMock.On("CreateOrgPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Organization"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, nil)
+
+ org, err := s.Runner.CreateOrganization(s.Fixtures.AdminContext, s.Fixtures.CreateOrgParams)
+
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Require().Nil(err)
+ s.Require().Equal(params.PoolBalancerTypePack, org.PoolBalancerType)
}
func (s *OrgTestSuite) TestCreateOrganizationErrUnauthorized() {
@@ -184,7 +202,7 @@ func (s *OrgTestSuite) TestCreateOrganizationEmptyParams() {
}
func (s *OrgTestSuite) TestCreateOrganizationMissingCredentials() {
- s.Fixtures.CreateOrgParams.CredentialsName = "not-existent-creds-name"
+ s.Fixtures.CreateOrgParams.CredentialsName = notExistingCredentialsName
_, err := s.Runner.CreateOrganization(s.Fixtures.AdminContext, s.Fixtures.CreateOrgParams)
@@ -206,7 +224,7 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error creating org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationStartPoolMgrFailed() {
@@ -218,20 +236,80 @@ func (s *OrgTestSuite) TestCreateOrganizationStartPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("starting org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error starting org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestListOrganizations() {
s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
- orgs, err := s.Runner.ListOrganizations(s.Fixtures.AdminContext)
+ orgs, err := s.Runner.ListOrganizations(s.Fixtures.AdminContext, params.OrganizationFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), garmTesting.DBEntityMapToSlice(s.Fixtures.StoreOrgs), orgs)
}
+func (s *OrgTestSuite) TestListOrganizationsWithFilter() {
+ s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ org, err := s.Fixtures.Store.CreateOrganization(
+ s.Fixtures.AdminContext,
+ "test-org",
+ s.testCreds,
+ "super-secret",
+ params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+
+ org2, err := s.Fixtures.Store.CreateOrganization(
+ s.Fixtures.AdminContext,
+ "test-org",
+ s.giteaTestCreds,
+ "super-secret",
+ params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+
+ org3, err := s.Fixtures.Store.CreateOrganization(
+ s.Fixtures.AdminContext,
+ "test-org2",
+ s.giteaTestCreds,
+ "super-secret",
+ params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+
+ orgs, err := s.Runner.ListOrganizations(
+ s.Fixtures.AdminContext,
+ params.OrganizationFilter{
+ Name: "test-org",
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org, org2}, orgs)
+
+ orgs, err = s.Runner.ListOrganizations(
+ s.Fixtures.AdminContext,
+ params.OrganizationFilter{
+ Name: "test-org",
+ Endpoint: s.giteaEndpoint.Name,
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org2}, orgs)
+
+ orgs, err = s.Runner.ListOrganizations(
+ s.Fixtures.AdminContext,
+ params.OrganizationFilter{
+ Name: "test-org2",
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org3}, orgs)
+}
+
func (s *OrgTestSuite) TestListOrganizationsErrUnauthorized() {
- _, err := s.Runner.ListOrganizations(context.Background())
+ _, err := s.Runner.ListOrganizations(context.Background(), params.OrganizationFilter{})
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
@@ -254,28 +332,32 @@ func (s *OrgTestSuite) TestGetOrganizationByIDErrUnauthorized() {
func (s *OrgTestSuite) TestDeleteOrganization() {
s.Fixtures.PoolMgrCtrlMock.On("DeleteOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(nil)
- err := s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-3"].ID)
+ err := s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-3"].ID, true)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
_, err = s.Fixtures.Store.GetOrganizationByID(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-3"].ID)
- s.Require().Equal("fetching org: not found", err.Error())
+ s.Require().Equal("error fetching org: not found", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationErrUnauthorized() {
- err := s.Runner.DeleteOrganization(context.Background(), "dummy-org-id")
+ err := s.Runner.DeleteOrganization(context.Background(), "dummy-org-id", true)
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
func (s *OrgTestSuite) TestDeleteOrganizationPoolDefinedFailed() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create store organizations pool: %v", err))
}
- err = s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID)
+ err = s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, true)
s.Require().Equal(runnerErrors.NewBadRequestError("org has pools defined (%s)", pool.ID), err)
}
@@ -283,14 +365,14 @@ func (s *OrgTestSuite) TestDeleteOrganizationPoolDefinedFailed() {
func (s *OrgTestSuite) TestDeleteOrganizationPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("DeleteOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.ErrMock)
- err := s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID)
+ err := s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, true)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("deleting org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error deleting org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganization() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateOrgPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
org, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.UpdateRepoParams)
@@ -298,10 +380,25 @@ func (s *OrgTestSuite) TestUpdateOrganization() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, org.WebhookSecret)
}
+func (s *OrgTestSuite) TestUpdateRepositoryBalancingType() {
+ s.Fixtures.UpdateRepoParams.PoolBalancerType = params.PoolBalancerTypePack
+ s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ param := s.Fixtures.UpdateRepoParams
+ param.PoolBalancerType = params.PoolBalancerTypePack
+ org, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, param)
+
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Require().Nil(err)
+ s.Require().Equal(params.PoolBalancerTypePack, org.PoolBalancerType)
+}
+
func (s *OrgTestSuite) TestUpdateOrganizationErrUnauthorized() {
_, err := s.Runner.UpdateOrganization(context.Background(), "dummy-org-id", s.Fixtures.UpdateRepoParams)
@@ -309,34 +406,33 @@ func (s *OrgTestSuite) TestUpdateOrganizationErrUnauthorized() {
}
func (s *OrgTestSuite) TestUpdateOrganizationInvalidCreds() {
- s.Fixtures.UpdateRepoParams.CredentialsName = "invalid-creds-name"
+ s.Fixtures.UpdateRepoParams.CredentialsName = invalidCredentialsName
_, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.UpdateRepoParams)
-
- s.Require().Equal(runnerErrors.NewBadRequestError("invalid credentials (%s) for org %s", s.Fixtures.UpdateRepoParams.CredentialsName, s.Fixtures.StoreOrgs["test-org-1"].Name), err)
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.FailNow(fmt.Sprintf("expected error: %v", runnerErrors.ErrNotFound))
+ }
}
func (s *OrgTestSuite) TestUpdateOrganizationPoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateOrgPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.UpdateRepoParams)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("updating org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("failed to get org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganizationCreateOrgPoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateOrgPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.UpdateRepoParams)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("updating org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("failed to get org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestCreateOrgPool() {
- s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
-
pool, err := s.Runner.CreateOrgPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
@@ -360,21 +456,8 @@ func (s *OrgTestSuite) TestCreateOrgPoolErrUnauthorized() {
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
-func (s *OrgTestSuite) TestCreateOrgPoolErrNotFound() {
- s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, runnerErrors.ErrNotFound)
-
- _, err := s.Runner.CreateOrgPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
-
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
- s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(runnerErrors.ErrNotFound, err)
-}
-
func (s *OrgTestSuite) TestCreateOrgPoolFetchPoolParamsFailed() {
- s.Fixtures.CreatePoolParams.ProviderName = "not-existent-provider-name"
-
- s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
-
+ s.Fixtures.CreatePoolParams.ProviderName = notExistingProviderName
_, err := s.Runner.CreateOrgPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
@@ -383,7 +466,11 @@ func (s *OrgTestSuite) TestCreateOrgPoolFetchPoolParamsFailed() {
}
func (s *OrgTestSuite) TestGetOrgPoolByID() {
- orgPool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ orgPool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %s", err))
}
@@ -401,7 +488,11 @@ func (s *OrgTestSuite) TestGetOrgPoolByIDErrUnauthorized() {
}
func (s *OrgTestSuite) TestDeleteOrgPool() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %s", err))
}
@@ -410,8 +501,8 @@ func (s *OrgTestSuite) TestDeleteOrgPool() {
s.Require().Nil(err)
- _, err = s.Fixtures.Store.GetOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Fixtures.Store.GetEntityPool(s.Fixtures.AdminContext, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrgPoolErrUnauthorized() {
@@ -421,7 +512,11 @@ func (s *OrgTestSuite) TestDeleteOrgPoolErrUnauthorized() {
}
func (s *OrgTestSuite) TestDeleteOrgPoolRunnersFailed() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
@@ -436,10 +531,14 @@ func (s *OrgTestSuite) TestDeleteOrgPoolRunnersFailed() {
}
func (s *OrgTestSuite) TestListOrgPools() {
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
orgPools := []params.Pool{}
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Image = fmt.Sprintf("test-org-%v", i)
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
@@ -459,7 +558,11 @@ func (s *OrgTestSuite) TestListOrgPoolsErrUnauthorized() {
}
func (s *OrgTestSuite) TestUpdateOrgPool() {
- orgPool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ orgPool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %s", err))
}
@@ -478,7 +581,11 @@ func (s *OrgTestSuite) TestUpdateOrgPoolErrUnauthorized() {
}
func (s *OrgTestSuite) TestUpdateOrgPoolMinIdleGreaterThanMax() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %s", err))
}
@@ -493,7 +600,11 @@ func (s *OrgTestSuite) TestUpdateOrgPoolMinIdleGreaterThanMax() {
}
func (s *OrgTestSuite) TestListOrgInstances() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
@@ -522,7 +633,7 @@ func (s *OrgTestSuite) TestListOrgInstancesErrUnauthorized() {
func (s *OrgTestSuite) TestFindOrgPoolManager() {
s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
- poolManager, err := s.Runner.findOrgPoolManager(s.Fixtures.StoreOrgs["test-org-1"].Name)
+ poolManager, err := s.Runner.findOrgPoolManager(s.Fixtures.StoreOrgs["test-org-1"].Name, s.Fixtures.StoreOrgs["test-org-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
@@ -533,7 +644,7 @@ func (s *OrgTestSuite) TestFindOrgPoolManager() {
func (s *OrgTestSuite) TestFindOrgPoolManagerFetchPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- _, err := s.Runner.findOrgPoolManager(s.Fixtures.StoreOrgs["test-org-1"].Name)
+ _, err := s.Runner.findOrgPoolManager(s.Fixtures.StoreOrgs["test-org-1"].Name, s.Fixtures.StoreOrgs["test-org-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
diff --git a/runner/pool/cache.go b/runner/pool/cache.go
new file mode 100644
index 00000000..5a3a3c8c
--- /dev/null
+++ b/runner/pool/cache.go
@@ -0,0 +1,75 @@
+package pool
+
+import (
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+type poolCacheStore interface {
+ Next() (params.Pool, error)
+ Reset()
+ Len() int
+}
+
+type poolRoundRobin struct {
+ pools []params.Pool
+ next uint32
+}
+
+func (p *poolRoundRobin) Next() (params.Pool, error) {
+ if len(p.pools) == 0 {
+ return params.Pool{}, runnerErrors.ErrNoPoolsAvailable
+ }
+
+ n := atomic.AddUint32(&p.next, 1)
+ return p.pools[(int(n)-1)%len(p.pools)], nil
+}
+
+func (p *poolRoundRobin) Len() int {
+ return len(p.pools)
+}
+
+func (p *poolRoundRobin) Reset() {
+ atomic.StoreUint32(&p.next, 0)
+}
+
+type poolsForTags struct {
+ pools sync.Map
+ poolCacheType params.PoolBalancerType
+}
+
+func (p *poolsForTags) Get(tags []string) (poolCacheStore, bool) {
+ sort.Strings(tags)
+ key := strings.Join(tags, "^")
+
+ v, ok := p.pools.Load(key)
+ if !ok {
+ return nil, false
+ }
+ poolCache := v.(*poolRoundRobin)
+ if p.poolCacheType == params.PoolBalancerTypePack {
+ // When we service a list of jobs, we want to try each pool in turn
+ // for each job. Pools are sorted by priority so we always start from the
+ // highest priority pool and move on to the next if the first one is full.
+ poolCache.Reset()
+ }
+ return poolCache, true
+}
+
+func (p *poolsForTags) Add(tags []string, pools []params.Pool) poolCacheStore {
+ sort.Slice(pools, func(i, j int) bool {
+ return pools[i].Priority > pools[j].Priority
+ })
+
+ sort.Strings(tags)
+ key := strings.Join(tags, "^")
+
+ poolRR := &poolRoundRobin{pools: pools}
+ v, _ := p.pools.LoadOrStore(key, poolRR)
+ return v.(*poolRoundRobin)
+}
diff --git a/runner/pool/common.go b/runner/pool/common.go
new file mode 100644
index 00000000..a41e034d
--- /dev/null
+++ b/runner/pool/common.go
@@ -0,0 +1,28 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+type RunnerLabels struct {
+ ID int64 `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Type string `json:"type,omitempty"`
+}
+
+type forgeRunner struct {
+ ID int64 `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Status string `json:"status,omitempty"`
+ Labels []RunnerLabels `json:"labels,omitempty"`
+}
diff --git a/runner/pool/enterprise.go b/runner/pool/enterprise.go
deleted file mode 100644
index 025c3415..00000000
--- a/runner/pool/enterprise.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package pool
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
- "sync"
-
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- dbCommon "github.com/cloudbase/garm/database/common"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/google/go-github/v53/github"
- "github.com/pkg/errors"
-)
-
-// test that we implement PoolManager
-var _ poolHelper = &enterprise{}
-
-func NewEnterprisePoolManager(ctx context.Context, cfg params.Enterprise, cfgInternal params.Internal, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error) {
- ghc, ghEnterpriseClient, err := util.GithubClient(ctx, cfgInternal.OAuth2Token, cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return nil, errors.Wrap(err, "getting github client")
- }
-
- wg := &sync.WaitGroup{}
- keyMuxes := &keyMutex{}
-
- helper := &enterprise{
- cfg: cfg,
- cfgInternal: cfgInternal,
- ctx: ctx,
- ghcli: ghc,
- ghcEnterpriseCli: ghEnterpriseClient,
- id: cfg.ID,
- store: store,
- }
-
- repo := &basePoolManager{
- ctx: ctx,
- store: store,
- providers: providers,
- controllerID: cfgInternal.ControllerID,
- quit: make(chan struct{}),
- helper: helper,
- credsDetails: cfgInternal.GithubCredentialsDetails,
- wg: wg,
- keyMux: keyMuxes,
- }
- return repo, nil
-}
-
-type enterprise struct {
- cfg params.Enterprise
- cfgInternal params.Internal
- ctx context.Context
- ghcli common.GithubClient
- ghcEnterpriseCli common.GithubEnterpriseClient
- id string
- store dbCommon.Store
-
- mux sync.Mutex
-}
-
-func (r *enterprise) GithubCLI() common.GithubClient {
- return r.ghcli
-}
-
-func (e *enterprise) PoolType() params.PoolType {
- return params.EnterprisePool
-}
-
-func (r *enterprise) GetRunnerInfoFromWorkflow(job params.WorkflowJob) (params.RunnerInfo, error) {
- if err := r.ValidateOwner(job); err != nil {
- return params.RunnerInfo{}, errors.Wrap(err, "validating owner")
- }
- workflow, ghResp, err := r.ghcli.GetWorkflowJobByID(r.ctx, job.Repository.Owner.Login, job.Repository.Name, job.WorkflowJob.ID)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return params.RunnerInfo{}, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching workflow info")
- }
- return params.RunnerInfo{}, errors.Wrap(err, "fetching workflow info")
- }
-
- if workflow.RunnerName != nil {
- return params.RunnerInfo{
- Name: *workflow.RunnerName,
- Labels: workflow.Labels,
- }, nil
- }
- return params.RunnerInfo{}, fmt.Errorf("failed to find runner name from workflow")
-}
-
-func (r *enterprise) UpdateState(param params.UpdatePoolStateParams) error {
- r.mux.Lock()
- defer r.mux.Unlock()
-
- r.cfg.WebhookSecret = param.WebhookSecret
- if param.InternalConfig != nil {
- r.cfgInternal = *param.InternalConfig
- }
-
- ghc, ghcEnterprise, err := util.GithubClient(r.ctx, r.GetGithubToken(), r.cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return errors.Wrap(err, "getting github client")
- }
- r.ghcli = ghc
- r.ghcEnterpriseCli = ghcEnterprise
- return nil
-}
-
-func (r *enterprise) GetGithubToken() string {
- return r.cfgInternal.OAuth2Token
-}
-
-func (r *enterprise) GetGithubRunners() ([]*github.Runner, error) {
- opts := github.ListOptions{
- PerPage: 100,
- }
-
- var allRunners []*github.Runner
- for {
- runners, ghResp, err := r.ghcEnterpriseCli.ListRunners(r.ctx, r.cfg.Name, &opts)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching runners")
- }
- return nil, errors.Wrap(err, "fetching runners")
- }
- allRunners = append(allRunners, runners.Runners...)
- if ghResp.NextPage == 0 {
- break
- }
- opts.Page = ghResp.NextPage
- }
- return allRunners, nil
-}
-
-func (r *enterprise) FetchTools() ([]*github.RunnerApplicationDownload, error) {
- r.mux.Lock()
- defer r.mux.Unlock()
- tools, ghResp, err := r.ghcEnterpriseCli.ListRunnerApplicationDownloads(r.ctx, r.cfg.Name)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching runners")
- }
- return nil, errors.Wrap(err, "fetching runner tools")
- }
-
- return tools, nil
-}
-
-func (r *enterprise) FetchDbInstances() ([]params.Instance, error) {
- return r.store.ListEnterpriseInstances(r.ctx, r.id)
-}
-
-func (r *enterprise) RemoveGithubRunner(runnerID int64) (*github.Response, error) {
- return r.ghcEnterpriseCli.RemoveRunner(r.ctx, r.cfg.Name, runnerID)
-}
-
-func (r *enterprise) ListPools() ([]params.Pool, error) {
- pools, err := r.store.ListEnterprisePools(r.ctx, r.id)
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
- return pools, nil
-}
-
-func (r *enterprise) GithubURL() string {
- return fmt.Sprintf("%s/enterprises/%s", r.cfgInternal.GithubCredentialsDetails.BaseURL, r.cfg.Name)
-}
-
-func (r *enterprise) JwtToken() string {
- return r.cfgInternal.JWTSecret
-}
-
-func (r *enterprise) GetGithubRegistrationToken() (string, error) {
- tk, ghResp, err := r.ghcEnterpriseCli.CreateRegistrationToken(r.ctx, r.cfg.Name)
-
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return "", errors.Wrap(runnerErrors.ErrUnauthorized, "fetching registration token")
- }
- return "", errors.Wrap(err, "creating runner token")
- }
- return *tk.Token, nil
-}
-
-func (r *enterprise) String() string {
- return r.cfg.Name
-}
-
-func (r *enterprise) WebhookSecret() string {
- return r.cfg.WebhookSecret
-}
-
-func (r *enterprise) GetCallbackURL() string {
- return r.cfgInternal.InstanceCallbackURL
-}
-
-func (r *enterprise) GetMetadataURL() string {
- return r.cfgInternal.InstanceMetadataURL
-}
-
-func (r *enterprise) FindPoolByTags(labels []string) (params.Pool, error) {
- pool, err := r.store.FindEnterprisePoolByTags(r.ctx, r.id, labels)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching suitable pool")
- }
- return pool, nil
-}
-
-func (r *enterprise) GetPoolByID(poolID string) (params.Pool, error) {
- pool, err := r.store.GetEnterprisePool(r.ctx, r.id, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (r *enterprise) ValidateOwner(job params.WorkflowJob) error {
- if !strings.EqualFold(job.Enterprise.Slug, r.cfg.Name) {
- return runnerErrors.NewBadRequestError("job not meant for this pool manager")
- }
- return nil
-}
-
-func (r *enterprise) ID() string {
- return r.id
-}
diff --git a/runner/pool/interfaces.go b/runner/pool/interfaces.go
deleted file mode 100644
index e707cdae..00000000
--- a/runner/pool/interfaces.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package pool
-
-import (
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
-
- "github.com/google/go-github/v53/github"
-)
-
-type poolHelper interface {
- GetGithubToken() string
- GetGithubRunners() ([]*github.Runner, error)
- GetGithubRegistrationToken() (string, error)
- GetRunnerInfoFromWorkflow(job params.WorkflowJob) (params.RunnerInfo, error)
- RemoveGithubRunner(runnerID int64) (*github.Response, error)
- FetchTools() ([]*github.RunnerApplicationDownload, error)
-
- GithubCLI() common.GithubClient
-
- FetchDbInstances() ([]params.Instance, error)
- ListPools() ([]params.Pool, error)
- GithubURL() string
- JwtToken() string
- String() string
- GetCallbackURL() string
- GetMetadataURL() string
- FindPoolByTags(labels []string) (params.Pool, error)
- GetPoolByID(poolID string) (params.Pool, error)
- ValidateOwner(job params.WorkflowJob) error
- UpdateState(param params.UpdatePoolStateParams) error
- WebhookSecret() string
- ID() string
- PoolType() params.PoolType
-}
diff --git a/runner/pool/organization.go b/runner/pool/organization.go
deleted file mode 100644
index a8a6ed9d..00000000
--- a/runner/pool/organization.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package pool
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
- "sync"
-
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- dbCommon "github.com/cloudbase/garm/database/common"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/google/go-github/v53/github"
- "github.com/pkg/errors"
-)
-
-// test that we implement PoolManager
-var _ poolHelper = &organization{}
-
-func NewOrganizationPoolManager(ctx context.Context, cfg params.Organization, cfgInternal params.Internal, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error) {
- ghc, _, err := util.GithubClient(ctx, cfgInternal.OAuth2Token, cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return nil, errors.Wrap(err, "getting github client")
- }
-
- wg := &sync.WaitGroup{}
- keyMuxes := &keyMutex{}
-
- helper := &organization{
- cfg: cfg,
- cfgInternal: cfgInternal,
- ctx: ctx,
- ghcli: ghc,
- id: cfg.ID,
- store: store,
- }
-
- repo := &basePoolManager{
- ctx: ctx,
- store: store,
- providers: providers,
- controllerID: cfgInternal.ControllerID,
- quit: make(chan struct{}),
- helper: helper,
- credsDetails: cfgInternal.GithubCredentialsDetails,
- wg: wg,
- keyMux: keyMuxes,
- }
- return repo, nil
-}
-
-type organization struct {
- cfg params.Organization
- cfgInternal params.Internal
- ctx context.Context
- ghcli common.GithubClient
- id string
- store dbCommon.Store
-
- mux sync.Mutex
-}
-
-func (r *organization) GithubCLI() common.GithubClient {
- return r.ghcli
-}
-
-func (o *organization) PoolType() params.PoolType {
- return params.OrganizationPool
-}
-
-func (r *organization) GetRunnerInfoFromWorkflow(job params.WorkflowJob) (params.RunnerInfo, error) {
- if err := r.ValidateOwner(job); err != nil {
- return params.RunnerInfo{}, errors.Wrap(err, "validating owner")
- }
- workflow, ghResp, err := r.ghcli.GetWorkflowJobByID(r.ctx, job.Organization.Login, job.Repository.Name, job.WorkflowJob.ID)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return params.RunnerInfo{}, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching workflow info")
- }
- return params.RunnerInfo{}, errors.Wrap(err, "fetching workflow info")
- }
-
- if workflow.RunnerName != nil {
- return params.RunnerInfo{
- Name: *workflow.RunnerName,
- Labels: workflow.Labels,
- }, nil
- }
- return params.RunnerInfo{}, fmt.Errorf("failed to find runner name from workflow")
-}
-
-func (r *organization) UpdateState(param params.UpdatePoolStateParams) error {
- r.mux.Lock()
- defer r.mux.Unlock()
-
- r.cfg.WebhookSecret = param.WebhookSecret
- if param.InternalConfig != nil {
- r.cfgInternal = *param.InternalConfig
- }
-
- ghc, _, err := util.GithubClient(r.ctx, r.GetGithubToken(), r.cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return errors.Wrap(err, "getting github client")
- }
- r.ghcli = ghc
- return nil
-}
-
-func (r *organization) GetGithubToken() string {
- return r.cfgInternal.OAuth2Token
-}
-
-func (r *organization) GetGithubRunners() ([]*github.Runner, error) {
- opts := github.ListOptions{
- PerPage: 100,
- }
-
- var allRunners []*github.Runner
- for {
- runners, ghResp, err := r.ghcli.ListOrganizationRunners(r.ctx, r.cfg.Name, &opts)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching runners")
- }
- return nil, errors.Wrap(err, "fetching runners")
- }
- allRunners = append(allRunners, runners.Runners...)
- if ghResp.NextPage == 0 {
- break
- }
- opts.Page = ghResp.NextPage
- }
-
- return allRunners, nil
-}
-
-func (r *organization) FetchTools() ([]*github.RunnerApplicationDownload, error) {
- r.mux.Lock()
- defer r.mux.Unlock()
- tools, ghResp, err := r.ghcli.ListOrganizationRunnerApplicationDownloads(r.ctx, r.cfg.Name)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching tools")
- }
- return nil, errors.Wrap(err, "fetching runner tools")
- }
-
- return tools, nil
-}
-
-func (r *organization) FetchDbInstances() ([]params.Instance, error) {
- return r.store.ListOrgInstances(r.ctx, r.id)
-}
-
-func (r *organization) RemoveGithubRunner(runnerID int64) (*github.Response, error) {
- return r.ghcli.RemoveOrganizationRunner(r.ctx, r.cfg.Name, runnerID)
-}
-
-func (r *organization) ListPools() ([]params.Pool, error) {
- pools, err := r.store.ListOrgPools(r.ctx, r.id)
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
- return pools, nil
-}
-
-func (r *organization) GithubURL() string {
- return fmt.Sprintf("%s/%s", r.cfgInternal.GithubCredentialsDetails.BaseURL, r.cfg.Name)
-}
-
-func (r *organization) JwtToken() string {
- return r.cfgInternal.JWTSecret
-}
-
-func (r *organization) GetGithubRegistrationToken() (string, error) {
- tk, ghResp, err := r.ghcli.CreateOrganizationRegistrationToken(r.ctx, r.cfg.Name)
-
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return "", errors.Wrap(runnerErrors.ErrUnauthorized, "fetching token")
- }
-
- return "", errors.Wrap(err, "creating runner token")
- }
- return *tk.Token, nil
-}
-
-func (r *organization) String() string {
- return r.cfg.Name
-}
-
-func (r *organization) WebhookSecret() string {
- return r.cfg.WebhookSecret
-}
-
-func (r *organization) GetCallbackURL() string {
- return r.cfgInternal.InstanceCallbackURL
-}
-
-func (r *organization) GetMetadataURL() string {
- return r.cfgInternal.InstanceMetadataURL
-}
-
-func (r *organization) FindPoolByTags(labels []string) (params.Pool, error) {
- pool, err := r.store.FindOrganizationPoolByTags(r.ctx, r.id, labels)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching suitable pool")
- }
- return pool, nil
-}
-
-func (r *organization) GetPoolByID(poolID string) (params.Pool, error) {
- pool, err := r.store.GetOrganizationPool(r.ctx, r.id, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (r *organization) ValidateOwner(job params.WorkflowJob) error {
- if !strings.EqualFold(job.Organization.Login, r.cfg.Name) {
- return runnerErrors.NewBadRequestError("job not meant for this pool manager")
- }
- return nil
-}
-
-func (r *organization) ID() string {
- return r.id
-}
diff --git a/runner/pool/pool.go b/runner/pool/pool.go
index a2516605..eecb500a 100644
--- a/runner/pool/pool.go
+++ b/runner/pool/pool.go
@@ -16,142 +16,215 @@ package pool
import (
"context"
+ "crypto/rand"
+ "errors"
"fmt"
- "log"
+ "log/slog"
"math"
+ "math/big"
"net/http"
"strconv"
"strings"
"sync"
"time"
- commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/google/go-github/v72/github"
+ "github.com/google/uuid"
+ "golang.org/x/sync/errgroup"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
"github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/cache"
dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ "github.com/cloudbase/garm/locking"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
-
- "github.com/google/go-github/v53/github"
- "github.com/google/uuid"
- "github.com/pkg/errors"
- "golang.org/x/sync/errgroup"
+ garmUtil "github.com/cloudbase/garm/util"
+ ghClient "github.com/cloudbase/garm/util/github"
+ "github.com/cloudbase/garm/util/github/scalesets"
)
var (
- poolIDLabelprefix = "runner-pool-id:"
- controllerLabelPrefix = "runner-controller-id:"
+ poolIDLabelprefix = "runner-pool-id"
+ controllerLabelPrefix = "runner-controller-id"
// We tag runners that have been spawned as a result of a queued job with the job ID
// that spawned them. There is no way to guarantee that the runner spawned in response to a particular
// job, will be picked up by that job. We mark them so as in the very likely event that the runner
// has picked up a different job, we can clear the lock on the job that spaned it.
// The job it picked up would already be transitioned to in_progress so it will be ignored by the
// consume loop.
- jobLabelPrefix = "in_response_to_job:"
+ jobLabelPrefix = "in_response_to_job"
)
const (
// maxCreateAttempts is the number of times we will attempt to create an instance
// before we give up.
+ //
+ // nolint:golangci-lint,godox
// TODO: make this configurable(?)
maxCreateAttempts = 5
)
-type keyMutex struct {
- muxes sync.Map
-}
-
-func (k *keyMutex) TryLock(key string) bool {
- mux, _ := k.muxes.LoadOrStore(key, &sync.Mutex{})
- keyMux := mux.(*sync.Mutex)
- return keyMux.TryLock()
-}
-
-func (k *keyMutex) Unlock(key string, remove bool) {
- mux, ok := k.muxes.Load(key)
- if !ok {
- return
+func NewEntityPoolManager(ctx context.Context, entity params.ForgeEntity, instanceTokenGetter auth.InstanceTokenGetter, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error) {
+ ctx = garmUtil.WithSlogContext(
+ ctx,
+ slog.Any("pool_mgr", entity.String()),
+ slog.Any("endpoint", entity.Credentials.Endpoint.Name),
+ slog.Any("pool_type", entity.EntityType),
+ )
+ ghc, err := ghClient.Client(ctx, entity)
+ if err != nil {
+ return nil, fmt.Errorf("error getting github client: %w", err)
}
- keyMux := mux.(*sync.Mutex)
- if remove {
- k.Delete(key)
- }
- keyMux.Unlock()
-}
-func (k *keyMutex) Delete(key string) {
- k.muxes.Delete(key)
+ if entity.WebhookSecret == "" {
+ return nil, fmt.Errorf("webhook secret is empty")
+ }
+
+ controllerInfo, err := store.ControllerInfo()
+ if err != nil {
+ return nil, fmt.Errorf("error getting controller info: %w", err)
+ }
+
+ consumerID := fmt.Sprintf("pool-manager-%s-%s", entity.String(), entity.Credentials.Endpoint.Name)
+ slog.InfoContext(ctx, "registering consumer", "consumer_id", consumerID)
+ consumer, err := watcher.RegisterConsumer(
+ ctx, consumerID,
+ composeWatcherFilters(entity),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error registering consumer: %w", err)
+ }
+
+ wg := &sync.WaitGroup{}
+ backoff, err := locking.NewInstanceDeleteBackoff(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error creating backoff: %w", err)
+ }
+
+ var scaleSetCli *scalesets.ScaleSetClient
+ if entity.Credentials.ForgeType == params.GithubEndpointType {
+ scaleSetCli, err = scalesets.NewClient(ghc)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get scalesets client: %w", err)
+ }
+ }
+ repo := &basePoolManager{
+ ctx: ctx,
+ consumerID: consumerID,
+ entity: entity,
+ ghcli: ghc,
+ scaleSetClient: scaleSetCli,
+ controllerInfo: controllerInfo,
+ instanceTokenGetter: instanceTokenGetter,
+
+ store: store,
+ providers: providers,
+ quit: make(chan struct{}),
+ wg: wg,
+ backoff: backoff,
+ consumer: consumer,
+ }
+ return repo, nil
}
type basePoolManager struct {
- ctx context.Context
- controllerID string
+ ctx context.Context
+ consumerID string
+ entity params.ForgeEntity
+ ghcli common.GithubClient
+ scaleSetClient *scalesets.ScaleSetClient
+ controllerInfo params.ControllerInfo
+ instanceTokenGetter auth.InstanceTokenGetter
+ consumer dbCommon.Consumer
store dbCommon.Store
providers map[string]common.Provider
- tools []*github.RunnerApplicationDownload
+ tools []commonParams.RunnerApplicationDownload
quit chan struct{}
- helper poolHelper
- credsDetails params.GithubCredentials
-
managerIsRunning bool
managerErrorReason string
- mux sync.Mutex
- wg *sync.WaitGroup
- keyMux *keyMutex
+ mux sync.Mutex
+ wg *sync.WaitGroup
+ backoff locking.InstanceDeleteBackoff
+}
+
+func (r *basePoolManager) getProviderBaseParams(pool params.Pool) common.ProviderBaseParams {
+ r.mux.Lock()
+ defer r.mux.Unlock()
+
+ return common.ProviderBaseParams{
+ PoolInfo: pool,
+ ControllerInfo: r.controllerInfo,
+ }
}
func (r *basePoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
- if err := r.helper.ValidateOwner(job); err != nil {
- return errors.Wrap(err, "validating owner")
+ if err := r.ValidateOwner(job); err != nil {
+ slog.ErrorContext(r.ctx, "failed to validate owner", "error", err)
+ return fmt.Errorf("error validating owner: %w", err)
+ }
+
+ // we see events where the lables seem to be missing. We should ignore these
+ // as we can't know if we should handle them or not.
+ if len(job.WorkflowJob.Labels) == 0 {
+ slog.WarnContext(r.ctx, "job has no labels", "workflow_job", job.WorkflowJob.Name)
+ return nil
+ }
+
+ jobParams, err := r.paramsWorkflowJobToParamsJob(job)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to convert job to params", "error", err)
+ return fmt.Errorf("error converting job to params: %w", err)
}
- var jobParams params.Job
- var err error
var triggeredBy int64
defer func() {
+ if jobParams.WorkflowJobID == 0 {
+ return
+ }
// we're updating the job in the database, regardless of whether it was successful or not.
// or if it was meant for this pool or not. Github will send the same job data to all hierarchies
// that have been configured to work with garm. Updating the job at all levels should yield the same
// outcome in the db.
- if jobParams.ID == 0 {
- return
- }
-
- _, err := r.store.GetJobByID(r.ctx, jobParams.ID)
+ _, err := r.store.GetJobByID(r.ctx, jobParams.WorkflowJobID)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- r.log("failed to get job %d: %s", jobParams.ID, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to get job",
+ "job_id", jobParams.WorkflowJobID)
return
}
// This job is new to us. Check if we have a pool that can handle it.
- potentialPools, err := r.store.FindPoolsMatchingAllTags(r.ctx, r.helper.PoolType(), r.helper.ID(), jobParams.Labels)
- if err != nil {
- r.log("failed to find pools matching tags %s: %s; not recording job", strings.Join(jobParams.Labels, ", "), err)
- return
- }
+ potentialPools := cache.FindPoolsMatchingAllTags(r.entity.ID, jobParams.Labels)
if len(potentialPools) == 0 {
- r.log("no pools matching tags %s; not recording job", strings.Join(jobParams.Labels, ", "))
+ slog.WarnContext(
+ r.ctx, "no pools matching tags; not recording job",
+ "requested_tags", strings.Join(jobParams.Labels, ", "))
return
}
}
if _, jobErr := r.store.CreateOrUpdateJob(r.ctx, jobParams); jobErr != nil {
- r.log("failed to update job %d: %s", jobParams.ID, jobErr)
+ slog.With(slog.Any("error", jobErr)).ErrorContext(
+ r.ctx, "failed to update job", "job_id", jobParams.WorkflowJobID)
}
- if triggeredBy != 0 && jobParams.ID != triggeredBy {
+ if triggeredBy != 0 && jobParams.WorkflowJobID != triggeredBy {
// The triggeredBy value is only set by the "in_progress" webhook. The runner that
// transitioned to in_progress was created as a result of a different queued job. If that job is
// still queued and we don't remove the lock, it will linger until the lock timeout is reached.
// That may take a long time, so we break the lock here and allow it to be scheduled again.
if err := r.store.BreakLockJobIsQueued(r.ctx, triggeredBy); err != nil {
- r.log("failed to break lock for job %d: %s", triggeredBy, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to break lock for job",
+ "job_id", triggeredBy)
}
}
}()
@@ -160,20 +233,22 @@ func (r *basePoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
case "queued":
// Record the job in the database. Queued jobs will be picked up by the consumeQueuedJobs() method
// when reconciling.
- jobParams, err = r.paramsWorkflowJobToParamsJob(job)
- if err != nil {
- return errors.Wrap(err, "converting job to params")
- }
case "completed":
- jobParams, err = r.paramsWorkflowJobToParamsJob(job)
- if err != nil {
- if errors.Is(err, runnerErrors.ErrNotFound) {
- // Unassigned jobs will have an empty runner_name.
- // We also need to ignore not found errors, as we may get a webhook regarding
- // a workflow that is handled by a runner at a different hierarchy level.
- return nil
- }
- return errors.Wrap(err, "converting job to params")
+ // If job was not assigned to a runner, we can ignore it.
+ if jobParams.RunnerName == "" {
+ slog.InfoContext(
+ r.ctx, "job never got assigned to a runner, ignoring")
+ return nil
+ }
+
+ fromCache, ok := cache.GetInstanceCache(jobParams.RunnerName)
+ if !ok {
+ return nil
+ }
+
+ if _, ok := cache.GetEntityPool(r.entity.ID, fromCache.PoolID); !ok {
+ slog.DebugContext(r.ctx, "instance belongs to a pool not managed by this entity", "pool_id", fromCache.PoolID)
+ return nil
}
// update instance workload state.
@@ -181,76 +256,85 @@ func (r *basePoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
if errors.Is(err, runnerErrors.ErrNotFound) {
return nil
}
- r.log("failed to update runner %s status: %s", util.SanitizeLogEntry(jobParams.RunnerName), err)
- return errors.Wrap(err, "updating runner")
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", util.SanitizeLogEntry(jobParams.RunnerName))
+ return fmt.Errorf("error updating runner: %w", err)
}
- r.log("marking instance %s as pending_delete", util.SanitizeLogEntry(jobParams.RunnerName))
+ slog.DebugContext(
+ r.ctx, "marking instance as pending_delete",
+ "runner_name", util.SanitizeLogEntry(jobParams.RunnerName))
if _, err := r.setInstanceStatus(jobParams.RunnerName, commonParams.InstancePendingDelete, nil); err != nil {
if errors.Is(err, runnerErrors.ErrNotFound) {
return nil
}
- r.log("failed to update runner %s status: %s", util.SanitizeLogEntry(jobParams.RunnerName), err)
- return errors.Wrap(err, "updating runner")
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", util.SanitizeLogEntry(jobParams.RunnerName))
+ return fmt.Errorf("error updating runner: %w", err)
}
case "in_progress":
- jobParams, err = r.paramsWorkflowJobToParamsJob(job)
- if err != nil {
- if errors.Is(err, runnerErrors.ErrNotFound) {
- // This is most likely a runner we're not managing. If we define a repo from within an org
- // and also define that same org, we will get a hook from github from both the repo and the org
- // regarding the same workflow. We look for the runner in the database, and make sure it exists and is
- // part of a pool that this manager is responsible for. A not found error here will most likely mean
- // that we are not responsible for that runner, and we should ignore it.
- return nil
- }
- return errors.Wrap(err, "converting job to params")
+ fromCache, ok := cache.GetInstanceCache(jobParams.RunnerName)
+ if !ok {
+ slog.DebugContext(r.ctx, "instance not found in cache", "runner_name", jobParams.RunnerName)
+ return nil
}
+ pool, ok := cache.GetEntityPool(r.entity.ID, fromCache.PoolID)
+ if !ok {
+ slog.DebugContext(r.ctx, "instance belongs to a pool not managed by this entity", "pool_id", fromCache.PoolID)
+ return nil
+ }
// update instance workload state.
instance, err := r.setInstanceRunnerStatus(jobParams.RunnerName, params.RunnerActive)
if err != nil {
if errors.Is(err, runnerErrors.ErrNotFound) {
return nil
}
- r.log("failed to update runner %s status: %s", util.SanitizeLogEntry(jobParams.RunnerName), err)
- return errors.Wrap(err, "updating runner")
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", util.SanitizeLogEntry(jobParams.RunnerName))
+ return fmt.Errorf("error updating runner: %w", err)
}
// Set triggeredBy here so we break the lock on any potential queued job.
- triggeredBy = jobIdFromLabels(instance.AditionalLabels)
+ triggeredBy = jobIDFromLabels(instance.AditionalLabels)
// A runner has picked up the job, and is now running it. It may need to be replaced if the pool has
// a minimum number of idle runners configured.
- pool, err := r.store.GetPoolByID(r.ctx, instance.PoolID)
- if err != nil {
- return errors.Wrap(err, "getting pool")
- }
if err := r.ensureIdleRunnersForOnePool(pool); err != nil {
- r.log("error ensuring idle runners for pool %s: %s", pool.ID, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "error ensuring idle runners for pool",
+ "pool_id", pool.ID)
}
}
return nil
}
-func jobIdFromLabels(labels []string) int64 {
+func jobIDFromLabels(labels []string) int64 {
for _, lbl := range labels {
if strings.HasPrefix(lbl, jobLabelPrefix) {
- jobId, err := strconv.ParseInt(lbl[len(jobLabelPrefix):], 10, 64)
+ trimLength := min(len(jobLabelPrefix)+1, len(lbl))
+ jobID, err := strconv.ParseInt(lbl[trimLength:], 10, 64)
if err != nil {
return 0
}
- return jobId
+ return jobID
}
}
return 0
}
func (r *basePoolManager) startLoopForFunction(f func() error, interval time.Duration, name string, alwaysRun bool) {
- r.log("starting %s loop for %s", name, r.helper.String())
+ slog.InfoContext(
+ r.ctx, "starting loop for entity",
+ "loop_name", name)
ticker := time.NewTicker(interval)
r.wg.Add(1)
defer func() {
- r.log("%s loop exited for pool %s", name, r.helper.String())
+ slog.InfoContext(
+ r.ctx, "pool loop exited",
+ "loop_name", name)
ticker.Stop()
r.wg.Done()
}()
@@ -265,9 +349,11 @@ func (r *basePoolManager) startLoopForFunction(f func() error, interval time.Dur
select {
case <-ticker.C:
if err := f(); err != nil {
- r.log("error in loop %s: %q", name, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "error in loop",
+ "loop_name", name)
if errors.Is(err, runnerErrors.ErrUnauthorized) {
- r.setPoolRunningState(false, err.Error())
+ r.SetPoolRunningState(false, err.Error())
}
}
case <-r.ctx.Done():
@@ -286,59 +372,28 @@ func (r *basePoolManager) startLoopForFunction(f func() error, interval time.Dur
// this worker was stopped.
return
default:
- r.waitForTimeoutOrCanceled(common.BackoffTimer)
+ r.waitForTimeoutOrCancelled(common.BackoffTimer)
}
}
}
}
func (r *basePoolManager) updateTools() error {
- // Update tools cache.
- tools, err := r.helper.FetchTools()
+ tools, err := cache.GetGithubToolsCache(r.entity.ID)
if err != nil {
- r.log("failed to update tools for repo %s: %s", r.helper.String(), err)
- r.setPoolRunningState(false, err.Error())
- r.waitForTimeoutOrCanceled(common.BackoffTimer)
- return fmt.Errorf("failed to update tools for repo %s: %w", r.helper.String(), err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update tools for entity", "entity", r.entity.String())
+ r.SetPoolRunningState(false, err.Error())
+ return fmt.Errorf("failed to update tools for entity %s: %w", r.entity.String(), err)
}
+
r.mux.Lock()
r.tools = tools
r.mux.Unlock()
- r.log("successfully updated tools")
- r.setPoolRunningState(true, "")
- return err
-}
-
-func controllerIDFromLabels(labels []string) string {
- for _, lbl := range labels {
- if strings.HasPrefix(lbl, controllerLabelPrefix) {
- return lbl[len(controllerLabelPrefix):]
- }
- }
- return ""
-}
-
-func labelsFromRunner(runner *github.Runner) []string {
- if runner == nil || runner.Labels == nil {
- return []string{}
- }
-
- var labels []string
- for _, val := range runner.Labels {
- if val == nil {
- continue
- }
- labels = append(labels, val.GetName())
- }
- return labels
-}
-
-// isManagedRunner returns true if labels indicate the runner belongs to a pool
-// this manager is responsible for.
-func (r *basePoolManager) isManagedRunner(labels []string) bool {
- runnerControllerID := controllerIDFromLabels(labels)
- return runnerControllerID == r.controllerID
+ slog.DebugContext(r.ctx, "successfully updated tools")
+ r.SetPoolRunningState(true, "")
+ return nil
}
// cleanupOrphanedProviderRunners compares runners in github with local runners and removes
@@ -348,56 +403,77 @@ func (r *basePoolManager) isManagedRunner(labels []string) bool {
// happens, github will remove the ephemeral worker and send a webhook our way.
// If we were offline and did not process the webhook, the instance will linger.
// We need to remove it from the provider and database.
-func (r *basePoolManager) cleanupOrphanedProviderRunners(runners []*github.Runner) error {
- dbInstances, err := r.helper.FetchDbInstances()
+func (r *basePoolManager) cleanupOrphanedProviderRunners(runners []forgeRunner) error {
+ dbInstances, err := r.store.ListEntityInstances(r.ctx, r.entity)
if err != nil {
- return errors.Wrap(err, "fetching instances from db")
+ return fmt.Errorf("error fetching instances from db: %w", err)
}
runnerNames := map[string]bool{}
for _, run := range runners {
- if !r.isManagedRunner(labelsFromRunner(run)) {
- r.log("runner %s is not managed by a pool belonging to %s", *run.Name, r.helper.String())
+ if !isManagedRunner(labelsFromRunner(run), r.controllerInfo.ControllerID.String()) {
+ slog.DebugContext(
+ r.ctx, "runner is not managed by a pool we manage",
+ "runner_name", run.Name)
continue
}
- runnerNames[*run.Name] = true
+ runnerNames[run.Name] = true
}
for _, instance := range dbInstances {
- lockAcquired := r.keyMux.TryLock(instance.Name)
- if !lockAcquired {
- r.log("failed to acquire lock for instance %s", instance.Name)
+ if instance.ScaleSetID != 0 {
+ // ignore scale set instances.
continue
}
- defer r.keyMux.Unlock(instance.Name, false)
- switch commonParams.InstanceStatus(instance.Status) {
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
+ if !lockAcquired {
+ slog.DebugContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
+ continue
+ }
+ defer locking.Unlock(instance.Name, false)
+
+ switch instance.Status {
case commonParams.InstancePendingCreate,
- commonParams.InstancePendingDelete:
+ commonParams.InstancePendingDelete, commonParams.InstancePendingForceDelete:
// this instance is in the process of being created or is awaiting deletion.
// Instances in pending_create did not get a chance to register themselves in,
// github so we let them be for now.
continue
}
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, instance.PoolID)
+ if err != nil {
+ return fmt.Errorf("error fetching instance pool info: %w", err)
+ }
switch instance.RunnerStatus {
case params.RunnerPending, params.RunnerInstalling:
- // runner is still installing. We give it a chance to finish.
- r.log("runner %s is still installing, give it a chance to finish", instance.Name)
- continue
+ if time.Since(instance.UpdatedAt).Minutes() < float64(pool.RunnerTimeout()) {
+ // runner is still installing. We give it a chance to finish.
+ slog.DebugContext(
+ r.ctx, "runner is still installing, give it a chance to finish",
+ "runner_name", instance.Name)
+ continue
+ }
}
if time.Since(instance.UpdatedAt).Minutes() < 5 {
// instance was updated recently. We give it a chance to register itself in github.
- r.log("instance %s was updated recently, skipping check", instance.Name)
+ slog.DebugContext(
+ r.ctx, "instance was updated recently, skipping check",
+ "runner_name", instance.Name)
continue
}
if ok := runnerNames[instance.Name]; !ok {
// Set pending_delete on DB field. Allow consolidate() to remove it.
if _, err := r.setInstanceStatus(instance.Name, commonParams.InstancePendingDelete, nil); err != nil {
- r.log("failed to update runner %s status: %s", instance.Name, err)
- return errors.Wrap(err, "updating runner")
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner",
+ "runner_name", instance.Name)
+ return fmt.Errorf("error updating runner: %w", err)
}
}
}
@@ -407,145 +483,171 @@ func (r *basePoolManager) cleanupOrphanedProviderRunners(runners []*github.Runne
// reapTimedOutRunners will mark as pending_delete any runner that has a status
// of "running" in the provider, but that has not registered with Github, and has
// received no new updates in the configured timeout interval.
-func (r *basePoolManager) reapTimedOutRunners(runners []*github.Runner) error {
- dbInstances, err := r.helper.FetchDbInstances()
+func (r *basePoolManager) reapTimedOutRunners(runners []forgeRunner) error {
+ dbInstances, err := r.store.ListEntityInstances(r.ctx, r.entity)
if err != nil {
- return errors.Wrap(err, "fetching instances from db")
+ return fmt.Errorf("error fetching instances from db: %w", err)
}
- runnersByName := map[string]*github.Runner{}
+ runnersByName := map[string]forgeRunner{}
for _, run := range runners {
- if !r.isManagedRunner(labelsFromRunner(run)) {
- r.log("runner %s is not managed by a pool belonging to %s", *run.Name, r.helper.String())
+ if !isManagedRunner(labelsFromRunner(run), r.controllerInfo.ControllerID.String()) {
+ slog.DebugContext(
+ r.ctx, "runner is not managed by a pool we manage",
+ "runner_name", run.Name)
continue
}
- runnersByName[*run.Name] = run
+ runnersByName[run.Name] = run
}
for _, instance := range dbInstances {
- r.log("attempting to lock instance %s", instance.Name)
- lockAcquired := r.keyMux.TryLock(instance.Name)
- if !lockAcquired {
- r.log("failed to acquire lock for instance %s", instance.Name)
+ if instance.ScaleSetID != 0 {
+ // ignore scale set instances.
continue
}
- defer r.keyMux.Unlock(instance.Name, false)
- pool, err := r.store.GetPoolByID(r.ctx, instance.PoolID)
+ slog.DebugContext(
+ r.ctx, "attempting to lock instance",
+ "runner_name", instance.Name)
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
+ if !lockAcquired {
+ slog.DebugContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
+ continue
+ }
+ defer locking.Unlock(instance.Name, false)
+
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, instance.PoolID)
if err != nil {
- return errors.Wrap(err, "fetching instance pool info")
+ return fmt.Errorf("error fetching instance pool info: %w", err)
}
if time.Since(instance.UpdatedAt).Minutes() < float64(pool.RunnerTimeout()) {
continue
}
- // There are 2 cases (currently) where we consider a runner as timed out:
+ // There are 3 cases (currently) where we consider a runner as timed out:
// * The runner never joined github within the pool timeout
// * The runner managed to join github, but the setup process failed later and the runner
// never started on the instance.
- //
- // There are several steps in the user data that sets up the runner:
- // * Download and unarchive the runner from github (or used the cached version)
- // * Configure runner (connects to github). At this point the runner is seen as offline.
- // * Install the service
- // * Set SELinux context (if SELinux is enabled)
- // * Start the service (if successful, the runner will transition to "online")
- // * Get the runner ID
- //
- // If we fail getting the runner ID after it's started, garm will set the runner status to "failed",
- // even though, technically the runner is online and fully functional. This is why we check here for
- // both the runner status as reported by GitHub and the runner status as reported by the provider.
- // If the runner is "offline" and marked as "failed", it should be safe to reap it.
- if runner, ok := runnersByName[instance.Name]; !ok || (runner.GetStatus() == "offline" && instance.RunnerStatus == params.RunnerFailed) {
- r.log("reaping timed-out/failed runner %s", instance.Name)
- if err := r.ForceDeleteRunner(instance); err != nil {
- r.log("failed to update runner %s status: %s", instance.Name, err)
- return errors.Wrap(err, "updating runner")
+ // * A JIT config was created, but the runner never joined github.
+ if runner, ok := runnersByName[instance.Name]; !ok || runner.Status == "offline" {
+ slog.InfoContext(
+ r.ctx, "reaping timed-out/failed runner",
+ "runner_name", instance.Name)
+ if err := r.DeleteRunner(instance, false, false); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
+ return fmt.Errorf("error updating runner: %w", err)
}
}
}
return nil
}
-func instanceInList(instanceName string, instances []commonParams.ProviderInstance) (commonParams.ProviderInstance, bool) {
- for _, val := range instances {
- if val.Name == instanceName {
- return val, true
- }
- }
- return commonParams.ProviderInstance{}, false
-}
-
// cleanupOrphanedGithubRunners will forcefully remove any github runners that appear
// as offline and for which we no longer have a local instance.
// This may happen if someone manually deletes the instance in the provider. We need to
// first remove the instance from github, and then from our database.
-func (r *basePoolManager) cleanupOrphanedGithubRunners(runners []*github.Runner) error {
+func (r *basePoolManager) cleanupOrphanedGithubRunners(runners []forgeRunner) error {
poolInstanceCache := map[string][]commonParams.ProviderInstance{}
g, ctx := errgroup.WithContext(r.ctx)
for _, runner := range runners {
- if !r.isManagedRunner(labelsFromRunner(runner)) {
- r.log("runner %s is not managed by a pool belonging to %s", *runner.Name, r.helper.String())
+ if !isManagedRunner(labelsFromRunner(runner), r.controllerInfo.ControllerID.String()) {
+ slog.DebugContext(
+ r.ctx, "runner is not managed by a pool we manage",
+ "runner_name", runner.Name)
continue
}
- status := runner.GetStatus()
+ status := runner.Status
if status != "offline" {
// Runner is online. Ignore it.
continue
}
- dbInstance, err := r.store.GetInstanceByName(r.ctx, *runner.Name)
+ dbInstance, err := r.store.GetInstance(r.ctx, runner.Name)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return errors.Wrap(err, "fetching instance from DB")
+ return fmt.Errorf("error fetching instance from DB: %w", err)
}
// We no longer have a DB entry for this instance, and the runner appears offline in github.
// Previous forceful removal may have failed?
- r.log("Runner %s has no database entry in garm, removing from github", *runner.Name)
- resp, err := r.helper.RemoveGithubRunner(*runner.ID)
- if err != nil {
+ slog.InfoContext(
+ r.ctx, "Runner has no database entry in garm, removing from github",
+ "runner_name", runner.Name)
+ if err := r.ghcli.RemoveEntityRunner(r.ctx, runner.ID); err != nil {
// Removed in the meantime?
- if resp != nil && resp.StatusCode == http.StatusNotFound {
+ if errors.Is(err, runnerErrors.ErrNotFound) {
continue
}
- return errors.Wrap(err, "removing runner")
+ return fmt.Errorf("error removing runner: %w", err)
}
continue
}
+ if dbInstance.ScaleSetID != 0 {
+ // ignore scale set instances.
+ continue
+ }
- switch commonParams.InstanceStatus(dbInstance.Status) {
+ switch dbInstance.Status {
case commonParams.InstancePendingDelete, commonParams.InstanceDeleting:
// already marked for deletion or is in the process of being deleted.
// Let consolidate take care of it.
continue
+ case commonParams.InstancePendingCreate, commonParams.InstanceCreating:
+ // instance is still being created. We give it a chance to finish.
+ slog.DebugContext(
+ r.ctx, "instance is still being created, give it a chance to finish",
+ "runner_name", dbInstance.Name)
+ continue
+ case commonParams.InstanceRunning:
+ // this check is not strictly needed, but can help avoid unnecessary strain on the provider.
+ // At worst, we will have a runner that is offline in github for 5 minutes before we reap it.
+ if time.Since(dbInstance.UpdatedAt).Minutes() < 5 {
+ // instance was updated recently. We give it a chance to register itself in github.
+ slog.DebugContext(
+ r.ctx, "instance was updated recently, skipping check",
+ "runner_name", dbInstance.Name)
+ continue
+ }
}
- pool, err := r.helper.GetPoolByID(dbInstance.PoolID)
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, dbInstance.PoolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
}
// check if the provider still has the instance.
- provider, ok := r.providers[pool.ProviderName]
+ provider, ok := r.providers[dbInstance.ProviderName]
if !ok {
- return fmt.Errorf("unknown provider %s for pool %s", pool.ProviderName, pool.ID)
+ return fmt.Errorf("unknown provider %s for pool %s", dbInstance.ProviderName, dbInstance.PoolID)
}
var poolInstances []commonParams.ProviderInstance
- poolInstances, ok = poolInstanceCache[pool.ID]
+ poolInstances, ok = poolInstanceCache[dbInstance.PoolID]
if !ok {
- r.log("updating instances cache for pool %s", pool.ID)
- poolInstances, err = provider.ListInstances(r.ctx, pool.ID)
- if err != nil {
- return errors.Wrapf(err, "fetching instances for pool %s", pool.ID)
+ slog.DebugContext(
+ r.ctx, "updating instances cache for pool",
+ "pool_id", pool.ID)
+ listInstancesParams := common.ListInstancesParams{
+ ListInstancesV011: common.ListInstancesV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
}
- poolInstanceCache[pool.ID] = poolInstances
+ poolInstances, err = provider.ListInstances(r.ctx, pool.ID, listInstancesParams)
+ if err != nil {
+ return fmt.Errorf("error fetching instances for pool %s: %w", dbInstance.PoolID, err)
+ }
+ poolInstanceCache[dbInstance.PoolID] = poolInstances
}
- lockAcquired := r.keyMux.TryLock(dbInstance.Name)
+ lockAcquired := locking.TryLock(dbInstance.Name, r.consumerID)
if !lockAcquired {
- r.log("failed to acquire lock for instance %s", dbInstance.Name)
+ slog.DebugContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", dbInstance.Name)
continue
}
@@ -554,26 +656,31 @@ func (r *basePoolManager) cleanupOrphanedGithubRunners(runners []*github.Runner)
g.Go(func() error {
deleteMux := false
defer func() {
- r.keyMux.Unlock(dbInstance.Name, deleteMux)
+ locking.Unlock(dbInstance.Name, deleteMux)
}()
providerInstance, ok := instanceInList(dbInstance.Name, poolInstances)
if !ok {
// The runner instance is no longer on the provider, and it appears offline in github.
// It should be safe to force remove it.
- r.log("Runner instance for %s is no longer on the provider, removing from github", dbInstance.Name)
- resp, err := r.helper.RemoveGithubRunner(*runner.ID)
- if err != nil {
+ slog.InfoContext(
+ r.ctx, "Runner instance is no longer on the provider, removing from github",
+ "runner_name", dbInstance.Name)
+ if err := r.ghcli.RemoveEntityRunner(r.ctx, runner.ID); err != nil {
// Removed in the meantime?
- if resp != nil && resp.StatusCode == http.StatusNotFound {
- r.log("runner dissapeared from github")
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ slog.DebugContext(
+ r.ctx, "runner disappeared from github",
+ "runner_name", dbInstance.Name)
} else {
- return errors.Wrap(err, "removing runner from github")
+ return fmt.Errorf("error removing runner from github: %w", err)
}
}
// Remove the database entry for the runner.
- r.log("Removing %s from database", dbInstance.Name)
+ slog.InfoContext(
+ r.ctx, "Removing from database",
+ "runner_name", dbInstance.Name)
if err := r.store.DeleteInstance(ctx, dbInstance.PoolID, dbInstance.Name); err != nil {
- return errors.Wrap(err, "removing runner from database")
+ return fmt.Errorf("error removing runner from database: %w", err)
}
deleteMux = true
return nil
@@ -583,20 +690,29 @@ func (r *basePoolManager) cleanupOrphanedGithubRunners(runners []*github.Runner)
// instance is running, but github reports runner as offline. Log the event.
// This scenario may require manual intervention.
// Perhaps it just came online and github did not yet change it's status?
- r.log("instance %s is online but github reports runner as offline", dbInstance.Name)
+ slog.WarnContext(
+ r.ctx, "instance is online but github reports runner as offline",
+ "runner_name", dbInstance.Name)
return nil
- } else {
- r.log("instance %s was found in stopped state; starting", dbInstance.Name)
- //start the instance
- if err := provider.Start(r.ctx, dbInstance.ProviderID); err != nil {
- return errors.Wrapf(err, "starting instance %s", dbInstance.ProviderID)
- }
+ }
+
+ slog.InfoContext(
+ r.ctx, "instance was found in stopped state; starting",
+ "runner_name", dbInstance.Name)
+
+ startParams := common.StartParams{
+ StartV011: common.StartV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
+ }
+ if err := provider.Start(r.ctx, dbInstance.ProviderID, startParams); err != nil {
+ return fmt.Errorf("error starting instance %s: %w", dbInstance.ProviderID, err)
}
return nil
})
}
if err := r.waitForErrorGroupOrContextCancelled(g); err != nil {
- return errors.Wrap(err, "removing orphaned github runners")
+ return fmt.Errorf("error removing orphaned github runners: %w", err)
}
return nil
}
@@ -620,41 +736,13 @@ func (r *basePoolManager) waitForErrorGroupOrContextCancelled(g *errgroup.Group)
}
}
-func (r *basePoolManager) fetchInstance(runnerName string) (params.Instance, error) {
- runner, err := r.store.GetInstanceByName(r.ctx, runnerName)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
- }
-
- _, err = r.helper.GetPoolByID(runner.PoolID)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching pool")
- }
-
- return runner, nil
-}
-
func (r *basePoolManager) setInstanceRunnerStatus(runnerName string, status params.RunnerStatus) (params.Instance, error) {
updateParams := params.UpdateInstanceParams{
RunnerStatus: status,
}
-
- instance, err := r.updateInstance(runnerName, updateParams)
+ instance, err := r.store.UpdateInstance(r.ctx, runnerName, updateParams)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "updating runner state")
- }
- return instance, nil
-}
-
-func (r *basePoolManager) updateInstance(runnerName string, update params.UpdateInstanceParams) (params.Instance, error) {
- runner, err := r.fetchInstance(runnerName)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
- }
-
- instance, err := r.store.UpdateInstance(r.ctx, runner.ID, update)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "updating runner state")
+ return params.Instance{}, fmt.Errorf("error updating runner state: %w", err)
}
return instance, nil
}
@@ -665,20 +753,36 @@ func (r *basePoolManager) setInstanceStatus(runnerName string, status commonPara
ProviderFault: providerFault,
}
- instance, err := r.updateInstance(runnerName, updateParams)
+ instance, err := r.store.UpdateInstance(r.ctx, runnerName, updateParams)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "updating runner state")
+ return params.Instance{}, fmt.Errorf("error updating runner state: %w", err)
}
return instance, nil
}
-func (r *basePoolManager) AddRunner(ctx context.Context, poolID string, aditionalLabels []string) error {
- pool, err := r.helper.GetPoolByID(poolID)
+func (r *basePoolManager) AddRunner(ctx context.Context, poolID string, aditionalLabels []string) (err error) {
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, poolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
+ }
+
+ provider, ok := r.providers[pool.ProviderName]
+ if !ok {
+ return fmt.Errorf("unknown provider %s for pool %s", pool.ProviderName, pool.ID)
}
name := fmt.Sprintf("%s-%s", pool.GetRunnerPrefix(), util.NewID())
+ labels := r.getLabelsForInstance(pool)
+
+ jitConfig := make(map[string]string)
+ var runner *github.Runner
+
+ if !provider.DisableJITConfig() && r.entity.Credentials.ForgeType != params.GiteaEndpointType {
+ jitConfig, runner, err = r.ghcli.GetEntityJITConfig(ctx, name, pool, labels)
+ if err != nil {
+ return fmt.Errorf("failed to generate JIT config: %w", err)
+ }
+ }
createParams := params.CreateInstanceParams{
Name: name,
@@ -686,18 +790,44 @@ func (r *basePoolManager) AddRunner(ctx context.Context, poolID string, aditiona
RunnerStatus: params.RunnerPending,
OSArch: pool.OSArch,
OSType: pool.OSType,
- CallbackURL: r.helper.GetCallbackURL(),
- MetadataURL: r.helper.GetMetadataURL(),
+ CallbackURL: r.controllerInfo.CallbackURL,
+ MetadataURL: r.controllerInfo.MetadataURL,
CreateAttempt: 1,
GitHubRunnerGroup: pool.GitHubRunnerGroup,
AditionalLabels: aditionalLabels,
+ JitConfiguration: jitConfig,
}
- _, err = r.store.CreateInstance(r.ctx, poolID, createParams)
- if err != nil {
- return errors.Wrap(err, "creating instance")
+ if runner != nil {
+ createParams.AgentID = runner.GetID()
}
+ instance, err := r.store.CreateInstance(r.ctx, poolID, createParams)
+ if err != nil {
+ return fmt.Errorf("error creating instance: %w", err)
+ }
+
+ defer func() {
+ if err != nil {
+ if instance.ID != "" {
+ if err := r.DeleteRunner(instance, false, false); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to cleanup instance",
+ "runner_name", instance.Name)
+ }
+ }
+
+ if runner != nil {
+ runnerCleanupErr := r.ghcli.RemoveEntityRunner(r.ctx, runner.GetID())
+ if err != nil {
+ slog.With(slog.Any("error", runnerCleanupErr)).ErrorContext(
+ ctx, "failed to remove runner",
+ "gh_runner_id", runner.GetID())
+ }
+ }
+ }
+ }()
+
return nil
}
@@ -710,26 +840,39 @@ func (r *basePoolManager) Status() params.PoolManagerStatus {
}
}
-func (r *basePoolManager) waitForTimeoutOrCanceled(timeout time.Duration) {
- r.log("sleeping for %.2f minutes", timeout.Minutes())
+func (r *basePoolManager) waitForTimeoutOrCancelled(timeout time.Duration) {
+ slog.DebugContext(
+ r.ctx, fmt.Sprintf("sleeping for %.2f minutes", timeout.Minutes()))
+ timer := time.NewTimer(timeout)
+ defer timer.Stop()
select {
- case <-time.After(timeout):
+ case <-timer.C:
case <-r.ctx.Done():
case <-r.quit:
}
}
-func (r *basePoolManager) setPoolRunningState(isRunning bool, failureReason string) {
+func (r *basePoolManager) SetPoolRunningState(isRunning bool, failureReason string) {
r.mux.Lock()
r.managerErrorReason = failureReason
r.managerIsRunning = isRunning
r.mux.Unlock()
}
+func (r *basePoolManager) getLabelsForInstance(pool params.Pool) []string {
+ labels := []string{}
+ for _, tag := range pool.Tags {
+ labels = append(labels, tag.Name)
+ }
+ labels = append(labels, r.controllerLabel())
+ labels = append(labels, r.poolLabel(pool.ID))
+ return labels
+}
+
func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error {
- pool, err := r.helper.GetPoolByID(instance.PoolID)
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, instance.PoolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
}
provider, ok := r.providers[pool.ProviderName]
@@ -737,25 +880,19 @@ func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error
return fmt.Errorf("unknown provider %s for pool %s", pool.ProviderName, pool.ID)
}
- labels := []string{}
- for _, tag := range pool.Tags {
- labels = append(labels, tag.Name)
- }
- labels = append(labels, r.controllerLabel())
- labels = append(labels, r.poolLabel(pool.ID))
-
jwtValidity := pool.RunnerTimeout()
- entity := r.helper.String()
- jwtToken, err := auth.NewInstanceJWTToken(instance, r.helper.JwtToken(), entity, pool.PoolType(), jwtValidity)
+ jwtToken, err := r.instanceTokenGetter.NewInstanceJWTToken(instance, r.entity, pool.PoolType(), jwtValidity)
if err != nil {
- return errors.Wrap(err, "fetching instance jwt token")
+ return fmt.Errorf("error fetching instance jwt token: %w", err)
}
+ hasJITConfig := len(instance.JitConfiguration) > 0
+
bootstrapArgs := commonParams.BootstrapInstance{
Name: instance.Name,
Tools: r.tools,
- RepoURL: r.helper.GithubURL(),
+ RepoURL: r.entity.ForgeURL(),
MetadataURL: instance.MetadataURL,
CallbackURL: instance.CallbackURL,
InstanceToken: jwtToken,
@@ -764,28 +901,47 @@ func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error
Flavor: pool.Flavor,
Image: pool.Image,
ExtraSpecs: pool.ExtraSpecs,
- Labels: labels,
PoolID: instance.PoolID,
- CACertBundle: r.credsDetails.CABundle,
+ CACertBundle: r.entity.Credentials.CABundle,
GitHubRunnerGroup: instance.GitHubRunnerGroup,
+ JitConfigEnabled: hasJITConfig,
+ }
+
+ if !hasJITConfig {
+ // We still need the labels here for situations where we don't have a JIT config generated.
+ // This can happen if GARM is used against an instance of GHES older than version 3.10.
+ // The labels field should be ignored by providers if JIT config is enabled.
+ bootstrapArgs.Labels = r.getLabelsForInstance(pool)
}
var instanceIDToDelete string
defer func() {
if instanceIDToDelete != "" {
- if err := provider.DeleteInstance(r.ctx, instanceIDToDelete); err != nil {
+ deleteInstanceParams := common.DeleteInstanceParams{
+ DeleteInstanceV011: common.DeleteInstanceV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
+ }
+ if err := provider.DeleteInstance(r.ctx, instanceIDToDelete, deleteInstanceParams); err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- r.log("failed to cleanup instance: %s", instanceIDToDelete)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to cleanup instance",
+ "provider_id", instanceIDToDelete)
}
}
}
}()
- providerInstance, err := provider.CreateInstance(r.ctx, bootstrapArgs)
+ createInstanceParams := common.CreateInstanceParams{
+ CreateInstanceV011: common.CreateInstanceV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
+ }
+ providerInstance, err := provider.CreateInstance(r.ctx, bootstrapArgs, createInstanceParams)
if err != nil {
instanceIDToDelete = instance.Name
- return errors.Wrap(err, "creating instance")
+ return fmt.Errorf("error creating instance: %w", err)
}
if providerInstance.Status == commonParams.InstanceError {
@@ -796,46 +952,12 @@ func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error
}
updateInstanceArgs := r.updateArgsFromProviderInstance(providerInstance)
- if _, err := r.store.UpdateInstance(r.ctx, instance.ID, updateInstanceArgs); err != nil {
- return errors.Wrap(err, "updating instance")
+ if _, err := r.store.UpdateInstance(r.ctx, instance.Name, updateInstanceArgs); err != nil {
+ return fmt.Errorf("error updating instance: %w", err)
}
return nil
}
-func (r *basePoolManager) getRunnerDetailsFromJob(job params.WorkflowJob) (params.RunnerInfo, error) {
- runnerInfo := params.RunnerInfo{
- Name: job.WorkflowJob.RunnerName,
- Labels: job.WorkflowJob.Labels,
- }
-
- var err error
- if job.WorkflowJob.RunnerName == "" {
- if job.WorkflowJob.Conclusion == "skipped" || job.WorkflowJob.Conclusion == "canceled" {
- // job was skipped or canceled before a runner was allocated. No point in continuing.
- return params.RunnerInfo{}, fmt.Errorf("job %d was skipped or canceled before a runner was allocated: %w", job.WorkflowJob.ID, runnerErrors.ErrNotFound)
- }
- // Runner name was not set in WorkflowJob by github. We can still attempt to
- // fetch the info we need, using the workflow run ID, from the API.
- r.log("runner name not found in workflow job, attempting to fetch from API")
- runnerInfo, err = r.helper.GetRunnerInfoFromWorkflow(job)
- if err != nil {
- return params.RunnerInfo{}, errors.Wrap(err, "fetching runner name from API")
- }
- }
-
- runnerDetails, err := r.store.GetInstanceByName(context.Background(), runnerInfo.Name)
- if err != nil {
- r.log("could not find runner details for %s", util.SanitizeLogEntry(runnerInfo.Name))
- return params.RunnerInfo{}, errors.Wrap(err, "fetching runner details")
- }
-
- if _, err := r.helper.GetPoolByID(runnerDetails.PoolID); err != nil {
- r.log("runner %s (pool ID: %s) does not belong to any pool we manage: %s", runnerDetails.Name, runnerDetails.PoolID, err)
- return params.RunnerInfo{}, errors.Wrap(err, "fetching pool for instance")
- }
- return runnerInfo, nil
-}
-
// paramsWorkflowJobToParamsJob returns a params.Job from a params.WorkflowJob, and aditionally determines
// if the runner belongs to this pool or not. It will always return a valid params.Job, even if it errs out.
// This allows us to still update the job in the database, even if we determined that it wasn't necessarily meant
@@ -852,11 +974,11 @@ func (r *basePoolManager) getRunnerDetailsFromJob(job params.WorkflowJob) (param
func (r *basePoolManager) paramsWorkflowJobToParamsJob(job params.WorkflowJob) (params.Job, error) {
asUUID, err := uuid.Parse(r.ID())
if err != nil {
- return params.Job{}, errors.Wrap(err, "parsing pool ID as UUID")
+ return params.Job{}, fmt.Errorf("error parsing pool ID as UUID: %w", err)
}
jobParams := params.Job{
- ID: job.WorkflowJob.ID,
+ WorkflowJobID: job.WorkflowJob.ID,
Action: job.Action,
RunID: job.WorkflowJob.RunID,
Status: job.WorkflowJob.Status,
@@ -865,6 +987,7 @@ func (r *basePoolManager) paramsWorkflowJobToParamsJob(job params.WorkflowJob) (
CompletedAt: job.WorkflowJob.CompletedAt,
Name: job.WorkflowJob.Name,
GithubRunnerID: job.WorkflowJob.RunnerID,
+ RunnerName: job.WorkflowJob.RunnerName,
RunnerGroupID: job.WorkflowJob.RunnerGroupID,
RunnerGroupName: job.WorkflowJob.RunnerGroupName,
RepositoryName: job.Repository.Name,
@@ -872,43 +995,26 @@ func (r *basePoolManager) paramsWorkflowJobToParamsJob(job params.WorkflowJob) (
Labels: job.WorkflowJob.Labels,
}
- runnerName := job.WorkflowJob.RunnerName
- if job.Action != "queued" && runnerName == "" {
- if job.WorkflowJob.Conclusion != "skipped" && job.WorkflowJob.Conclusion != "canceled" {
- // Runner name was not set in WorkflowJob by github. We can still attempt to fetch the info we need,
- // using the workflow run ID, from the API.
- // We may still get no runner name. In situations such as jobs being cancelled before a runner had the chance
- // to pick up the job, the runner name is not available from the API.
- runnerInfo, err := r.getRunnerDetailsFromJob(job)
- if err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
- return jobParams, errors.Wrap(err, "fetching runner details")
- }
- runnerName = runnerInfo.Name
- }
- }
-
- jobParams.RunnerName = runnerName
-
- switch r.helper.PoolType() {
- case params.EnterprisePool:
+ switch r.entity.EntityType {
+ case params.ForgeEntityTypeEnterprise:
jobParams.EnterpriseID = &asUUID
- case params.RepositoryPool:
+ case params.ForgeEntityTypeRepository:
jobParams.RepoID = &asUUID
- case params.OrganizationPool:
+ case params.ForgeEntityTypeOrganization:
jobParams.OrgID = &asUUID
default:
- return jobParams, errors.Errorf("unknown pool type: %s", r.helper.PoolType())
+ return jobParams, fmt.Errorf("unknown pool type: %s", r.entity.EntityType)
}
return jobParams, nil
}
func (r *basePoolManager) poolLabel(poolID string) string {
- return fmt.Sprintf("%s%s", poolIDLabelprefix, poolID)
+ return fmt.Sprintf("%s=%s", poolIDLabelprefix, poolID)
}
func (r *basePoolManager) controllerLabel() string {
- return fmt.Sprintf("%s%s", controllerLabelPrefix, r.controllerID)
+ return fmt.Sprintf("%s=%s", controllerLabelPrefix, r.controllerInfo.ControllerID.String())
}
func (r *basePoolManager) updateArgsFromProviderInstance(providerInstance commonParams.ProviderInstance) params.UpdateInstanceParams {
@@ -923,9 +1029,13 @@ func (r *basePoolManager) updateArgsFromProviderInstance(providerInstance common
}
func (r *basePoolManager) scaleDownOnePool(ctx context.Context, pool params.Pool) error {
- r.log("scaling down pool %s", pool.ID)
+ slog.DebugContext(
+ ctx, "scaling down pool",
+ "pool_id", pool.ID)
if !pool.Enabled {
- r.log("pool %s is disabled, skipping scale down", pool.ID)
+ slog.DebugContext(
+ ctx, "pool is disabled, skipping scale down",
+ "pool_id", pool.ID)
return nil
}
@@ -949,7 +1059,7 @@ func (r *basePoolManager) scaleDownOnePool(ctx context.Context, pool params.Pool
return nil
}
- surplus := float64(len(idleWorkers) - int(pool.MinIdleRunners))
+ surplus := float64(len(idleWorkers) - pool.MinIdleRunnersAsInt())
if surplus <= 0 {
return nil
@@ -967,22 +1077,52 @@ func (r *basePoolManager) scaleDownOnePool(ctx context.Context, pool params.Pool
for _, instanceToDelete := range idleWorkers[:numScaleDown] {
instanceToDelete := instanceToDelete
- lockAcquired := r.keyMux.TryLock(instanceToDelete.Name)
+ lockAcquired := locking.TryLock(instanceToDelete.Name, r.consumerID)
if !lockAcquired {
- r.log("failed to acquire lock for instance %s", instanceToDelete.Name)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to acquire lock for instance",
+ "provider_id", instanceToDelete.Name)
continue
}
- defer r.keyMux.Unlock(instanceToDelete.Name, false)
+ defer locking.Unlock(instanceToDelete.Name, false)
g.Go(func() error {
- r.log("scaling down idle worker %s from pool %s\n", instanceToDelete.Name, pool.ID)
- if err := r.ForceDeleteRunner(instanceToDelete); err != nil {
+ slog.InfoContext(
+ ctx, "scaling down idle worker from pool",
+ "runner_name", instanceToDelete.Name,
+ "pool_id", pool.ID)
+ if err := r.DeleteRunner(instanceToDelete, false, false); err != nil {
return fmt.Errorf("failed to delete instance %s: %w", instanceToDelete.ID, err)
}
return nil
})
}
+ if numScaleDown > 0 {
+ // We just scaled down a runner for this pool. That means that if we have jobs that are
+ // still queued in our DB, and those jobs should match this pool but have not been picked
+ // up by a runner, they are most likely stale and can be removed. For now, we can simply
+ // remove jobs older than 10 minutes.
+ //
+ // nolint:golangci-lint,godox
+ // TODO: should probably allow aditional filters to list functions. Would help to filter by date
+ // instead of returning a bunch of results and filtering manually.
+ queued, err := r.store.ListEntityJobsByStatus(r.ctx, r.entity.EntityType, r.entity.ID, params.JobStatusQueued)
+ if err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("error listing queued jobs: %w", err)
+ }
+
+ for _, job := range queued {
+ if time.Since(job.CreatedAt).Minutes() > 10 && pool.HasRequiredLabels(job.Labels) {
+ if err := r.store.DeleteJob(ctx, job.WorkflowJobID); err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to delete job",
+ "job_id", job.WorkflowJobID)
+ }
+ }
+ }
+ }
+
if err := r.waitForErrorGroupOrContextCancelled(g); err != nil {
return fmt.Errorf("failed to scale down pool %s: %w", pool.ID, err)
}
@@ -999,7 +1139,7 @@ func (r *basePoolManager) addRunnerToPool(pool params.Pool, aditionalLabels []st
return fmt.Errorf("failed to list pool instances: %w", err)
}
- if poolInstanceCount >= int64(pool.MaxRunners) {
+ if poolInstanceCount >= int64(pool.MaxRunnersAsInt()) {
return fmt.Errorf("max workers (%d) reached for pool %s", pool.MaxRunners, pool.ID)
}
@@ -1017,11 +1157,13 @@ func (r *basePoolManager) ensureIdleRunnersForOnePool(pool params.Pool) error {
existingInstances, err := r.store.ListPoolInstances(r.ctx, pool.ID)
if err != nil {
return fmt.Errorf("failed to ensure minimum idle workers for pool %s: %w", pool.ID, err)
-
}
if uint(len(existingInstances)) >= pool.MaxRunners {
- r.log("max workers (%d) reached for pool %s, skipping idle worker creation", pool.MaxRunners, pool.ID)
+ slog.DebugContext(
+ r.ctx, "max workers reached for pool, skipping idle worker creation",
+ "max_runners", pool.MaxRunners,
+ "pool_id", pool.ID)
return nil
}
@@ -1033,20 +1175,27 @@ func (r *basePoolManager) ensureIdleRunnersForOnePool(pool params.Pool) error {
}
var required int
- if len(idleOrPendingWorkers) < int(pool.MinIdleRunners) {
+ if len(idleOrPendingWorkers) < pool.MinIdleRunnersAsInt() {
// get the needed delta.
- required = int(pool.MinIdleRunners) - len(idleOrPendingWorkers)
+ required = pool.MinIdleRunnersAsInt() - len(idleOrPendingWorkers)
projectedInstanceCount := len(existingInstances) + required
- if uint(projectedInstanceCount) > pool.MaxRunners {
+
+ var projected uint
+ if projectedInstanceCount > 0 {
+ projected = uint(projectedInstanceCount)
+ }
+ if projected > pool.MaxRunners {
// ensure we don't go above max workers
- delta := projectedInstanceCount - int(pool.MaxRunners)
- required = required - delta
+ delta := projectedInstanceCount - pool.MaxRunnersAsInt()
+ required -= delta
}
}
for i := 0; i < required; i++ {
- r.log("adding new idle worker to pool %s", pool.ID)
+ slog.InfoContext(
+ r.ctx, "adding new idle worker to pool",
+ "pool_id", pool.ID)
if err := r.AddRunner(r.ctx, pool.ID, nil); err != nil {
return fmt.Errorf("failed to add new instance for pool %s: %w", pool.ID, err)
}
@@ -1058,7 +1207,9 @@ func (r *basePoolManager) retryFailedInstancesForOnePool(ctx context.Context, po
if !pool.Enabled {
return nil
}
- r.log("running retry failed instances for pool %s", pool.ID)
+ slog.DebugContext(
+ ctx, "running retry failed instances for pool",
+ "pool_id", pool.ID)
existingInstances, err := r.store.ListPoolInstances(r.ctx, pool.ID)
if err != nil {
@@ -1067,6 +1218,8 @@ func (r *basePoolManager) retryFailedInstancesForOnePool(ctx context.Context, po
g, errCtx := errgroup.WithContext(ctx)
for _, instance := range existingInstances {
+ instance := instance
+
if instance.Status != commonParams.InstanceError {
continue
}
@@ -1074,21 +1227,30 @@ func (r *basePoolManager) retryFailedInstancesForOnePool(ctx context.Context, po
continue
}
- r.log("attempting to retry failed instance %s", instance.Name)
- lockAcquired := r.keyMux.TryLock(instance.Name)
+ slog.DebugContext(
+ ctx, "attempting to retry failed instance",
+ "runner_name", instance.Name)
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
if !lockAcquired {
- r.log("failed to acquire lock for instance %s", instance.Name)
+ slog.DebugContext(
+ ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
continue
}
- instance := instance
g.Go(func() error {
- defer r.keyMux.Unlock(instance.Name, false)
+ defer locking.Unlock(instance.Name, false)
+ slog.DebugContext(
+ ctx, "attempting to clean up any previous instance",
+ "runner_name", instance.Name)
+ // nolint:golangci-lint,godox
// NOTE(gabriel-samfira): this is done in parallel. If there are many failed instances
// this has the potential to create many API requests to the target provider.
// TODO(gabriel-samfira): implement request throttling.
if err := r.deleteInstanceFromProvider(errCtx, instance); err != nil {
- r.log("failed to delete instance %s from provider: %s", instance.Name, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to delete instance from provider",
+ "runner_name", instance.Name)
// Bail here, otherwise we risk creating multiple failing instances, and losing track
// of them. If Create instance failed to return a proper provider ID, we rely on the
// name to delete the instance. If we don't bail here, and end up with multiple
@@ -1098,20 +1260,28 @@ func (r *basePoolManager) retryFailedInstancesForOnePool(ctx context.Context, po
// which we would rather avoid.
return err
}
-
+ slog.DebugContext(
+ ctx, "cleanup of previously failed instance complete",
+ "runner_name", instance.Name)
+ // nolint:golangci-lint,godox
// TODO(gabriel-samfira): Incrementing CreateAttempt should be done within a transaction.
// It's fairly safe to do here (for now), as there should be no other code path that updates
// an instance in this state.
- var tokenFetched bool = false
+ var tokenFetched bool = len(instance.JitConfiguration) > 0
updateParams := params.UpdateInstanceParams{
CreateAttempt: instance.CreateAttempt + 1,
TokenFetched: &tokenFetched,
Status: commonParams.InstancePendingCreate,
+ RunnerStatus: params.RunnerPending,
}
- r.log("queueing previously failed instance %s for retry", instance.Name)
+ slog.DebugContext(
+ ctx, "queueing previously failed instance for retry",
+ "runner_name", instance.Name)
// Set instance to pending create and wait for retry.
- if _, err := r.updateInstance(instance.Name, updateParams); err != nil {
- r.log("failed to update runner %s status: %s", instance.Name, err)
+ if _, err := r.store.UpdateInstance(r.ctx, instance.Name, updateParams); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to update runner status",
+ "runner_name", instance.Name)
}
return nil
})
@@ -1123,10 +1293,7 @@ func (r *basePoolManager) retryFailedInstancesForOnePool(ctx context.Context, po
}
func (r *basePoolManager) retryFailedInstances() error {
- pools, err := r.helper.ListPools()
- if err != nil {
- return fmt.Errorf("error listing pools: %w", err)
- }
+ pools := cache.GetEntityPools(r.entity.ID)
g, ctx := errgroup.WithContext(r.ctx)
for _, pool := range pools {
pool := pool
@@ -1146,15 +1313,14 @@ func (r *basePoolManager) retryFailedInstances() error {
}
func (r *basePoolManager) scaleDown() error {
- pools, err := r.helper.ListPools()
- if err != nil {
- return fmt.Errorf("error listing pools: %w", err)
- }
+ pools := cache.GetEntityPools(r.entity.ID)
g, ctx := errgroup.WithContext(r.ctx)
for _, pool := range pools {
pool := pool
g.Go(func() error {
- r.log("running scale down for pool %s", pool.ID)
+ slog.DebugContext(
+ ctx, "running scale down for pool",
+ "pool_id", pool.ID)
return r.scaleDownOnePool(ctx, pool)
})
}
@@ -1165,11 +1331,7 @@ func (r *basePoolManager) scaleDown() error {
}
func (r *basePoolManager) ensureMinIdleRunners() error {
- pools, err := r.helper.ListPools()
- if err != nil {
- return fmt.Errorf("error listing pools: %w", err)
- }
-
+ pools := cache.GetEntityPools(r.entity.ID)
g, _ := errgroup.WithContext(r.ctx)
for _, pool := range pools {
pool := pool
@@ -1185,14 +1347,14 @@ func (r *basePoolManager) ensureMinIdleRunners() error {
}
func (r *basePoolManager) deleteInstanceFromProvider(ctx context.Context, instance params.Instance) error {
- pool, err := r.helper.GetPoolByID(instance.PoolID)
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, instance.PoolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
}
- provider, ok := r.providers[pool.ProviderName]
+ provider, ok := r.providers[instance.ProviderName]
if !ok {
- return fmt.Errorf("unknown provider %s for pool %s", pool.ProviderName, pool.ID)
+ return fmt.Errorf("unknown provider %s for pool %s", instance.ProviderName, instance.PoolID)
}
identifier := instance.ProviderID
@@ -1202,68 +1364,147 @@ func (r *basePoolManager) deleteInstanceFromProvider(ctx context.Context, instan
identifier = instance.Name
}
- if err := provider.DeleteInstance(ctx, identifier); err != nil {
- return errors.Wrap(err, "removing instance")
+ slog.DebugContext(
+ ctx, "calling delete instance on provider",
+ "runner_name", instance.Name,
+ "provider_id", identifier)
+
+ deleteInstanceParams := common.DeleteInstanceParams{
+ DeleteInstanceV011: common.DeleteInstanceV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
+ }
+ if err := provider.DeleteInstance(ctx, identifier, deleteInstanceParams); err != nil {
+ return fmt.Errorf("error removing instance: %w", err)
}
return nil
}
+func (r *basePoolManager) sleepWithCancel(sleepTime time.Duration) (canceled bool) {
+ if sleepTime == 0 {
+ return false
+ }
+ ticker := time.NewTicker(sleepTime)
+ defer ticker.Stop()
+
+ select {
+ case <-ticker.C:
+ return false
+ case <-r.quit:
+ case <-r.ctx.Done():
+ }
+ return true
+}
+
func (r *basePoolManager) deletePendingInstances() error {
- instances, err := r.helper.FetchDbInstances()
+ instances, err := r.store.ListEntityInstances(r.ctx, r.entity)
if err != nil {
return fmt.Errorf("failed to fetch instances from store: %w", err)
}
- r.log("removing instances in pending_delete")
+ slog.DebugContext(
+ r.ctx, "removing instances in pending_delete")
for _, instance := range instances {
- if instance.Status != commonParams.InstancePendingDelete {
+ if instance.ScaleSetID != 0 {
+ // instance is part of a scale set. Skip.
+ continue
+ }
+
+ if instance.Status != commonParams.InstancePendingDelete && instance.Status != commonParams.InstancePendingForceDelete {
// not in pending_delete status. Skip.
continue
}
- r.log("removing instance %s in pool %s", instance.Name, instance.PoolID)
- lockAcquired := r.keyMux.TryLock(instance.Name)
+ slog.InfoContext(
+ r.ctx, "removing instance from pool",
+ "runner_name", instance.Name,
+ "pool_id", instance.PoolID)
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
if !lockAcquired {
- r.log("failed to acquire lock for instance %s", instance.Name)
+ slog.InfoContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
continue
}
- // Set the status to deleting before launching the goroutine that removes
- // the runner from the provider (which can take a long time).
- if _, err := r.setInstanceStatus(instance.Name, commonParams.InstanceDeleting, nil); err != nil {
- r.log("failed to update runner %s status: %q", instance.Name, err)
- r.keyMux.Unlock(instance.Name, false)
+ shouldProcess, deadline := r.backoff.ShouldProcess(instance.Name)
+ if !shouldProcess {
+ slog.DebugContext(
+ r.ctx, "backoff in effect for instance",
+ "runner_name", instance.Name, "deadline", deadline)
+ locking.Unlock(instance.Name, false)
continue
}
go func(instance params.Instance) (err error) {
+ // Prevent Thundering Herd. Should alleviate some of the database
+ // is locked errors in sqlite3.
+ num, err := rand.Int(rand.Reader, big.NewInt(2000))
+ if err != nil {
+ return fmt.Errorf("failed to generate random number: %w", err)
+ }
+ jitter := time.Duration(num.Int64()) * time.Millisecond
+ if canceled := r.sleepWithCancel(jitter); canceled {
+ return nil
+ }
+
+ currentStatus := instance.Status
deleteMux := false
defer func() {
- r.keyMux.Unlock(instance.Name, deleteMux)
+ locking.Unlock(instance.Name, deleteMux)
+ if deleteMux {
+ // deleteMux is set only when the instance was successfully removed.
+ // We can use it as a marker to signal that the backoff is no longer
+ // needed.
+ r.backoff.Delete(instance.Name)
+ }
}()
defer func(instance params.Instance) {
if err != nil {
- r.log("failed to remove instance %s: %s", instance.Name, err)
- // failed to remove from provider. Set the status back to pending_delete, which
- // will retry the operation.
- if _, err := r.setInstanceStatus(instance.Name, commonParams.InstancePendingDelete, nil); err != nil {
- r.log("failed to update runner %s status: %s", instance.Name, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to remove instance",
+ "runner_name", instance.Name)
+ // failed to remove from provider. Set status to previous value, which will retry
+ // the operation.
+ if _, err := r.setInstanceStatus(instance.Name, currentStatus, []byte(err.Error())); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
}
+ r.backoff.RecordFailure(instance.Name)
}
}(instance)
- r.log("removing instance %s from provider", instance.Name)
+ if _, err := r.setInstanceStatus(instance.Name, commonParams.InstanceDeleting, nil); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
+ return err
+ }
+
+ slog.DebugContext(
+ r.ctx, "removing instance from provider",
+ "runner_name", instance.Name)
err = r.deleteInstanceFromProvider(r.ctx, instance)
if err != nil {
- return fmt.Errorf("failed to remove instance from provider: %w", err)
+ if currentStatus != commonParams.InstancePendingForceDelete {
+ return fmt.Errorf("failed to remove instance from provider: %w", err)
+ }
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to remove instance from provider (continuing anyway)",
+ "instance", instance.Name)
}
- r.log("removing instance %s from database", instance.Name)
+ slog.InfoContext(
+ r.ctx, "removing instance from database",
+ "runner_name", instance.Name)
if deleteErr := r.store.DeleteInstance(r.ctx, instance.PoolID, instance.Name); deleteErr != nil {
return fmt.Errorf("failed to delete instance from database: %w", deleteErr)
}
deleteMux = true
- r.log("instance %s was successfully removed", instance.Name)
+ slog.InfoContext(
+ r.ctx, "instance was successfully removed",
+ "runner_name", instance.Name)
return nil
}(instance) //nolint
}
@@ -1272,44 +1513,66 @@ func (r *basePoolManager) deletePendingInstances() error {
}
func (r *basePoolManager) addPendingInstances() error {
+ // nolint:golangci-lint,godox
// TODO: filter instances by status.
- instances, err := r.helper.FetchDbInstances()
+ instances, err := r.store.ListEntityInstances(r.ctx, r.entity)
if err != nil {
return fmt.Errorf("failed to fetch instances from store: %w", err)
}
for _, instance := range instances {
+ if instance.ScaleSetID != 0 {
+ // instance is part of a scale set. Skip.
+ continue
+ }
+
if instance.Status != commonParams.InstancePendingCreate {
// not in pending_create status. Skip.
continue
}
- r.log("attempting to acquire lock for instance %s (create)", instance.Name)
- lockAcquired := r.keyMux.TryLock(instance.Name)
+ slog.DebugContext(
+ r.ctx, "attempting to acquire lock for instance",
+ "runner_name", instance.Name,
+ "action", "create_pending")
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
if !lockAcquired {
- r.log("failed to acquire lock for instance %s", instance.Name)
+ slog.DebugContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
continue
}
// Set the instance to "creating" before launching the goroutine. This will ensure that addPendingInstances()
// won't attempt to create the runner a second time.
if _, err := r.setInstanceStatus(instance.Name, commonParams.InstanceCreating, nil); err != nil {
- r.log("failed to update runner %s status: %s", instance.Name, err)
- r.keyMux.Unlock(instance.Name, false)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
+ locking.Unlock(instance.Name, false)
// We failed to transition the instance to Creating. This means that garm will retry to create this instance
// when the loop runs again and we end up with multiple instances.
continue
}
go func(instance params.Instance) {
- defer r.keyMux.Unlock(instance.Name, false)
- r.log("creating instance %s in pool %s", instance.Name, instance.PoolID)
+ defer locking.Unlock(instance.Name, false)
+ slog.InfoContext(
+ r.ctx, "creating instance in pool",
+ "runner_name", instance.Name,
+ "pool_id", instance.PoolID)
if err := r.addInstanceToProvider(instance); err != nil {
- r.log("failed to add instance to provider: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to add instance to provider",
+ "runner_name", instance.Name)
errAsBytes := []byte(err.Error())
- if _, err := r.setInstanceStatus(instance.Name, commonParams.InstanceError, errAsBytes); err != nil {
- r.log("failed to update runner %s status: %s", instance.Name, err)
+ if _, statusErr := r.setInstanceStatus(instance.Name, commonParams.InstanceError, errAsBytes); statusErr != nil {
+ slog.With(slog.Any("error", statusErr)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
}
- r.log("failed to create instance in provider: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to create instance in provider",
+ "runner_name", instance.Name)
}
}(instance)
}
@@ -1319,21 +1582,24 @@ func (r *basePoolManager) addPendingInstances() error {
func (r *basePoolManager) Wait() error {
done := make(chan struct{})
+ timer := time.NewTimer(60 * time.Second)
go func() {
r.wg.Wait()
+ timer.Stop()
close(done)
}()
select {
case <-done:
- case <-time.After(60 * time.Second):
- return errors.Wrap(runnerErrors.ErrTimeout, "waiting for pool to stop")
+ case <-timer.C:
+ return runnerErrors.NewTimeoutError("waiting for pool to stop")
}
return nil
}
func (r *basePoolManager) runnerCleanup() (err error) {
- r.log("running runner cleanup")
- runners, err := r.helper.GetGithubRunners()
+ slog.DebugContext(
+ r.ctx, "running runner cleanup")
+ runners, err := r.GetGithubRunners()
if err != nil {
return fmt.Errorf("failed to fetch github runners: %w", err)
}
@@ -1342,40 +1608,65 @@ func (r *basePoolManager) runnerCleanup() (err error) {
return fmt.Errorf("failed to reap timed out runners: %w", err)
}
- if err := r.cleanupOrphanedRunners(); err != nil {
+ if err := r.cleanupOrphanedRunners(runners); err != nil {
return fmt.Errorf("failed to cleanup orphaned runners: %w", err)
}
return nil
}
-func (r *basePoolManager) cleanupOrphanedRunners() error {
- runners, err := r.helper.GetGithubRunners()
- if err != nil {
- return errors.Wrap(err, "fetching github runners")
- }
+func (r *basePoolManager) cleanupOrphanedRunners(runners []forgeRunner) error {
if err := r.cleanupOrphanedProviderRunners(runners); err != nil {
- return errors.Wrap(err, "cleaning orphaned instances")
+ return fmt.Errorf("error cleaning orphaned instances: %w", err)
}
if err := r.cleanupOrphanedGithubRunners(runners); err != nil {
- return errors.Wrap(err, "cleaning orphaned github runners")
+ return fmt.Errorf("error cleaning orphaned github runners: %w", err)
}
return nil
}
func (r *basePoolManager) Start() error {
- r.updateTools() //nolint
+ initialToolUpdate := make(chan struct{}, 1)
+ go func() {
+ slog.Info("running initial tool update")
+ for {
+ slog.DebugContext(r.ctx, "waiting for tools to be available")
+ hasTools, stopped := r.waitForToolsOrCancel()
+ if stopped {
+ return
+ }
+ if hasTools {
+ break
+ }
+ }
+ if err := r.updateTools(); err != nil {
+ slog.With(slog.Any("error", err)).Error("failed to update tools")
+ }
+ initialToolUpdate <- struct{}{}
+ }()
- go r.startLoopForFunction(r.runnerCleanup, common.PoolReapTimeoutInterval, "timeout_reaper", false)
- go r.startLoopForFunction(r.scaleDown, common.PoolScaleDownInterval, "scale_down", false)
- go r.startLoopForFunction(r.deletePendingInstances, common.PoolConsilitationInterval, "consolidate[delete_pending]", false)
- go r.startLoopForFunction(r.addPendingInstances, common.PoolConsilitationInterval, "consolidate[add_pending]", false)
- go r.startLoopForFunction(r.ensureMinIdleRunners, common.PoolConsilitationInterval, "consolidate[ensure_min_idle]", false)
- go r.startLoopForFunction(r.retryFailedInstances, common.PoolConsilitationInterval, "consolidate[retry_failed]", false)
- go r.startLoopForFunction(r.updateTools, common.PoolToolUpdateInterval, "update_tools", true)
- go r.startLoopForFunction(r.consumeQueuedJobs, common.PoolConsilitationInterval, "job_queue_consumer", false)
+ go r.runWatcher()
+ go func() {
+ select {
+ case <-r.quit:
+ return
+ case <-r.ctx.Done():
+ return
+ case <-initialToolUpdate:
+ }
+ defer close(initialToolUpdate)
+ go r.startLoopForFunction(r.runnerCleanup, common.PoolReapTimeoutInterval, "timeout_reaper", false)
+ go r.startLoopForFunction(r.scaleDown, common.PoolScaleDownInterval, "scale_down", false)
+ // always run the delete pending instances routine. This way we can still remove existing runners, even if the pool is not running.
+ go r.startLoopForFunction(r.deletePendingInstances, common.PoolConsilitationInterval, "consolidate[delete_pending]", true)
+ go r.startLoopForFunction(r.addPendingInstances, common.PoolConsilitationInterval, "consolidate[add_pending]", false)
+ go r.startLoopForFunction(r.ensureMinIdleRunners, common.PoolConsilitationInterval, "consolidate[ensure_min_idle]", false)
+ go r.startLoopForFunction(r.retryFailedInstances, common.PoolConsilitationInterval, "consolidate[retry_failed]", false)
+ go r.startLoopForFunction(r.updateTools, common.PoolToolUpdateInterval, "update_tools", true)
+ go r.startLoopForFunction(r.consumeQueuedJobs, common.PoolConsilitationInterval, "job_queue_consumer", false)
+ }()
return nil
}
@@ -1384,57 +1675,54 @@ func (r *basePoolManager) Stop() error {
return nil
}
-func (r *basePoolManager) RefreshState(param params.UpdatePoolStateParams) error {
- return r.helper.UpdateState(param)
-}
-
func (r *basePoolManager) WebhookSecret() string {
- return r.helper.WebhookSecret()
-}
-
-func (r *basePoolManager) GithubRunnerRegistrationToken() (string, error) {
- return r.helper.GetGithubRegistrationToken()
+ return r.entity.WebhookSecret
}
func (r *basePoolManager) ID() string {
- return r.helper.ID()
+ return r.entity.ID
}
-func (r *basePoolManager) ForceDeleteRunner(runner params.Instance) error {
- if !r.managerIsRunning {
- return runnerErrors.NewConflictError("pool manager is not running for %s", r.helper.String())
+// Delete runner will delete a runner from a pool. If forceRemove is set to true, any error received from
+// the IaaS provider will be ignored and deletion will continue.
+func (r *basePoolManager) DeleteRunner(runner params.Instance, forceRemove, bypassGHUnauthorizedError bool) error {
+ if !r.managerIsRunning && !bypassGHUnauthorizedError {
+ return runnerErrors.NewConflictError("pool manager is not running for %s", r.entity.String())
}
+
if runner.AgentID != 0 {
- resp, err := r.helper.RemoveGithubRunner(runner.AgentID)
- if err != nil {
- if resp != nil {
- switch resp.StatusCode {
- case http.StatusUnprocessableEntity:
- return errors.Wrapf(runnerErrors.ErrBadRequest, "removing runner: %q", err)
- case http.StatusNotFound:
- // Runner may have been deleted by a finished job, or manually by the user.
- r.log("runner with agent id %d was not found in github", runner.AgentID)
- case http.StatusUnauthorized:
- // Mark the pool as offline from this point forward
- failureReason := fmt.Sprintf("failed to remove runner: %q", err)
- r.setPoolRunningState(false, failureReason)
- log.Print(failureReason)
- // evaluate the next switch case.
- fallthrough
- default:
- return errors.Wrap(err, "removing runner")
+ if err := r.ghcli.RemoveEntityRunner(r.ctx, runner.AgentID); err != nil {
+ if errors.Is(err, runnerErrors.ErrUnauthorized) {
+ slog.With(slog.Any("error", err)).ErrorContext(r.ctx, "failed to remove runner from github")
+ // Mark the pool as offline from this point forward
+ r.SetPoolRunningState(false, fmt.Sprintf("failed to remove runner: %q", err))
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to remove runner")
+ if bypassGHUnauthorizedError {
+ slog.Info("bypass github unauthorized error is set, marking runner for deletion")
+ } else {
+ return fmt.Errorf("error removing runner: %w", err)
}
} else {
- // We got a nil response. Assume we are in error.
- return errors.Wrap(err, "removing runner")
+ return fmt.Errorf("error removing runner: %w", err)
}
}
}
- r.log("setting instance status for: %v", runner.Name)
- if _, err := r.setInstanceStatus(runner.Name, commonParams.InstancePendingDelete, nil); err != nil {
- r.log("failed to update runner %s status: %s", runner.Name, err)
- return errors.Wrap(err, "updating runner")
+ instanceStatus := commonParams.InstancePendingDelete
+ if forceRemove {
+ instanceStatus = commonParams.InstancePendingForceDelete
+ }
+
+ slog.InfoContext(
+ r.ctx, "setting instance status",
+ "runner_name", runner.Name,
+ "status", instanceStatus)
+ if _, err := r.setInstanceStatus(runner.Name, instanceStatus, nil); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner",
+ "runner_name", runner.Name)
+ return fmt.Errorf("error updating runner: %w", err)
}
return nil
}
@@ -1463,138 +1751,314 @@ func (r *basePoolManager) ForceDeleteRunner(runner params.Instance) error {
// so those will trigger the creation of a runner. The jobs we don't know about will be dealt with by the idle runners.
// Once jobs are consumed, you can set min-idle-runners to 0 again.
func (r *basePoolManager) consumeQueuedJobs() error {
- queued, err := r.store.ListEntityJobsByStatus(r.ctx, r.helper.PoolType(), r.helper.ID(), params.JobStatusQueued)
+ queued, err := r.store.ListEntityJobsByStatus(r.ctx, r.entity.EntityType, r.entity.ID, params.JobStatusQueued)
if err != nil {
- return errors.Wrap(err, "listing queued jobs")
+ return fmt.Errorf("error listing queued jobs: %w", err)
}
- poolsCache := poolsForTags{}
+ poolsCache := poolsForTags{
+ poolCacheType: r.entity.GetPoolBalancerType(),
+ }
- r.log("found %d queued jobs for %s", len(queued), r.helper.String())
+ slog.DebugContext(
+ r.ctx, "found queued jobs",
+ "job_count", len(queued))
for _, job := range queued {
if job.LockedBy != uuid.Nil && job.LockedBy.String() != r.ID() {
// Job was handled by us or another entity.
- r.log("job %d is locked by %s", job.ID, job.LockedBy.String())
+ slog.DebugContext(
+ r.ctx, "job is locked",
+ "job_id", job.WorkflowJobID,
+ "locking_entity", job.LockedBy.String())
continue
}
- if time.Since(job.UpdatedAt) < time.Second*30 {
+ if time.Since(job.UpdatedAt) < time.Second*r.controllerInfo.JobBackoff() {
// give the idle runners a chance to pick up the job.
- r.log("job %d was updated less than 30 seconds ago. Skipping", job.ID)
+ slog.DebugContext(
+ r.ctx, "job backoff not reached", "backoff_interval", r.controllerInfo.MinimumJobAgeBackoff,
+ "job_id", job.WorkflowJobID)
continue
}
if time.Since(job.UpdatedAt) >= time.Minute*10 {
- // Job has been in queued state for 10 minutes or more. Check if it was consumed by another runner.
- workflow, ghResp, err := r.helper.GithubCLI().GetWorkflowJobByID(r.ctx, job.RepositoryOwner, job.RepositoryName, job.ID)
- if err != nil {
- if ghResp != nil {
- switch ghResp.StatusCode {
- case http.StatusNotFound:
- // Job does not exist in github. Remove it from the database.
- if err := r.store.DeleteJob(r.ctx, job.ID); err != nil {
- return errors.Wrap(err, "deleting job")
- }
- default:
- r.log("failed to fetch job information from github: %q (status code: %d)", err, ghResp.StatusCode)
- }
- }
- r.log("error fetching workflow info: %q", err)
- continue
- }
-
- if workflow.GetStatus() != "queued" {
- r.log("job is no longer in queued state on github. New status is: %s", workflow.GetStatus())
- job.Action = workflow.GetStatus()
- job.Status = workflow.GetStatus()
- job.Conclusion = workflow.GetConclusion()
- if workflow.RunnerName != nil {
- job.RunnerName = *workflow.RunnerName
- }
- if workflow.RunnerID != nil {
- job.GithubRunnerID = *workflow.RunnerID
- }
- if workflow.RunnerGroupName != nil {
- job.RunnerGroupName = *workflow.RunnerGroupName
- }
- if workflow.RunnerGroupID != nil {
- job.RunnerGroupID = *workflow.RunnerGroupID
- }
- if _, err := r.store.CreateOrUpdateJob(r.ctx, job); err != nil {
- r.log("failed to update job status: %q", err)
- }
- continue
- }
-
- // Job is still queued in our db and in github. Unlock it and try again.
- if err := r.store.UnlockJob(r.ctx, job.ID, r.ID()); err != nil {
+ // Job is still queued in our db, 10 minutes after a matching runner
+ // was spawned. Unlock it and try again. A different job may have picked up
+ // the runner.
+ if err := r.store.UnlockJob(r.ctx, job.WorkflowJobID, r.ID()); err != nil {
+ // nolint:golangci-lint,godox
// TODO: Implament a cache? Should we return here?
- r.log("failed to unlock job %d: %q", job.ID, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to unlock job",
+ "job_id", job.WorkflowJobID)
continue
}
}
if job.LockedBy.String() == r.ID() {
+ // nolint:golangci-lint,godox
// Job is locked by us. We must have already attepted to create a runner for it. Skip.
// TODO(gabriel-samfira): create an in-memory state of existing runners that we can easily
// check for existing pending or idle runners. If we can't find any, attempt to allocate another
// runner.
- r.log("job %d is locked by us", job.ID)
+ slog.DebugContext(
+ r.ctx, "job is locked by us",
+ "job_id", job.WorkflowJobID)
continue
}
poolRR, ok := poolsCache.Get(job.Labels)
if !ok {
- potentialPools, err := r.store.FindPoolsMatchingAllTags(r.ctx, r.helper.PoolType(), r.helper.ID(), job.Labels)
+ potentialPools, err := r.store.FindPoolsMatchingAllTags(r.ctx, r.entity.EntityType, r.entity.ID, job.Labels)
if err != nil {
- r.log("error finding pools matching labels: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "error finding pools matching labels")
continue
}
poolRR = poolsCache.Add(job.Labels, potentialPools)
}
if poolRR.Len() == 0 {
- r.log("could not find pools with labels %s", strings.Join(job.Labels, ","))
+ slog.DebugContext(r.ctx, "could not find pools with labels", "requested_labels", strings.Join(job.Labels, ","))
continue
}
runnerCreated := false
- if err := r.store.LockJob(r.ctx, job.ID, r.ID()); err != nil {
- r.log("could not lock job %d: %s", job.ID, err)
+ if err := r.store.LockJob(r.ctx, job.WorkflowJobID, r.ID()); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "could not lock job",
+ "job_id", job.WorkflowJobID)
continue
}
jobLabels := []string{
- fmt.Sprintf("%s%d", jobLabelPrefix, job.ID),
+ fmt.Sprintf("%s=%d", jobLabelPrefix, job.WorkflowJobID),
}
for i := 0; i < poolRR.Len(); i++ {
pool, err := poolRR.Next()
if err != nil {
- r.log("could not find a pool to create a runner for job %d: %s", job.ID, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "could not find a pool to create a runner for job",
+ "job_id", job.WorkflowJobID)
break
}
- r.log("attempting to create a runner in pool %s for job %d", pool.ID, job.ID)
+ slog.InfoContext(
+ r.ctx, "attempting to create a runner in pool",
+ "pool_id", pool.ID,
+ "job_id", job.WorkflowJobID)
if err := r.addRunnerToPool(pool, jobLabels); err != nil {
- r.log("[PoolRR] could not add runner to pool %s: %s", pool.ID, err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "could not add runner to pool",
+ "pool_id", pool.ID)
continue
}
- r.log("a new runner was added to pool %s as a response to queued job %d", pool.ID, job.ID)
+ slog.DebugContext(r.ctx, "a new runner was added as a response to queued job",
+ "pool_id", pool.ID,
+ "job_id", job.WorkflowJobID)
runnerCreated = true
break
}
if !runnerCreated {
- r.log("could not create a runner for job %d; unlocking", job.ID)
- if err := r.store.UnlockJob(r.ctx, job.ID, r.ID()); err != nil {
- r.log("failed to unlock job: %d", job.ID)
- return errors.Wrap(err, "unlocking job")
+ slog.WarnContext(
+ r.ctx, "could not create a runner for job; unlocking",
+ "job_id", job.WorkflowJobID)
+ if err := r.store.UnlockJob(r.ctx, job.WorkflowJobID, r.ID()); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to unlock job",
+ "job_id", job.WorkflowJobID)
+ return fmt.Errorf("error unlocking job: %w", err)
}
}
}
if err := r.store.DeleteCompletedJobs(r.ctx); err != nil {
- r.log("failed to delete completed jobs: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to delete completed jobs")
}
return nil
}
+
+func (r *basePoolManager) UninstallWebhook(ctx context.Context) error {
+ if r.controllerInfo.ControllerWebhookURL == "" {
+ return runnerErrors.NewBadRequestError("controller webhook url is empty")
+ }
+
+ allHooks, err := r.listHooks(ctx)
+ if err != nil {
+ return fmt.Errorf("error listing hooks: %w", err)
+ }
+
+ var controllerHookID int64
+ var baseHook string
+ trimmedBase := strings.TrimRight(r.controllerInfo.WebhookURL, "/")
+ trimmedController := strings.TrimRight(r.controllerInfo.ControllerWebhookURL, "/")
+
+ for _, hook := range allHooks {
+ hookInfo := hookToParamsHookInfo(hook)
+ info := strings.TrimRight(hookInfo.URL, "/")
+ if strings.EqualFold(info, trimmedController) {
+ controllerHookID = hook.GetID()
+ }
+
+ if strings.EqualFold(info, trimmedBase) {
+ baseHook = hookInfo.URL
+ }
+ }
+
+ if controllerHookID != 0 {
+ _, err = r.ghcli.DeleteEntityHook(ctx, controllerHookID)
+ if err != nil {
+ return fmt.Errorf("deleting hook: %w", err)
+ }
+ return nil
+ }
+
+ if baseHook != "" {
+ return runnerErrors.NewBadRequestError("base hook found (%s) and must be deleted manually", baseHook)
+ }
+
+ return nil
+}
+
+func (r *basePoolManager) InstallHook(ctx context.Context, req *github.Hook) (params.HookInfo, error) {
+ allHooks, err := r.listHooks(ctx)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error listing hooks: %w", err)
+ }
+
+ if err := validateHookRequest(r.controllerInfo.ControllerID.String(), r.controllerInfo.WebhookURL, allHooks, req); err != nil {
+ return params.HookInfo{}, fmt.Errorf("error validating hook request: %w", err)
+ }
+
+ hook, err := r.ghcli.CreateEntityHook(ctx, req)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error creating entity hook: %w", err)
+ }
+
+ if _, err := r.ghcli.PingEntityHook(ctx, hook.GetID()); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to ping hook",
+ "hook_id", hook.GetID(),
+ "entity", r.entity)
+ }
+
+ return hookToParamsHookInfo(hook), nil
+}
+
+func (r *basePoolManager) InstallWebhook(ctx context.Context, param params.InstallWebhookParams) (params.HookInfo, error) {
+ if r.controllerInfo.ControllerWebhookURL == "" {
+ return params.HookInfo{}, runnerErrors.NewBadRequestError("controller webhook url is empty")
+ }
+
+ insecureSSL := "0"
+ if param.InsecureSSL {
+ insecureSSL = "1"
+ }
+ req := &github.Hook{
+ Active: github.Ptr(true),
+ Config: &github.HookConfig{
+ ContentType: github.Ptr("json"),
+ InsecureSSL: github.Ptr(insecureSSL),
+ URL: github.Ptr(r.controllerInfo.ControllerWebhookURL),
+ Secret: github.Ptr(r.WebhookSecret()),
+ },
+ Events: []string{
+ "workflow_job",
+ },
+ }
+
+ return r.InstallHook(ctx, req)
+}
+
+func (r *basePoolManager) ValidateOwner(job params.WorkflowJob) error {
+ switch r.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ if !strings.EqualFold(job.Repository.Name, r.entity.Name) || !strings.EqualFold(job.Repository.Owner.Login, r.entity.Owner) {
+ return runnerErrors.NewBadRequestError("job not meant for this pool manager")
+ }
+ case params.ForgeEntityTypeOrganization:
+ if !strings.EqualFold(job.GetOrgName(r.entity.Credentials.ForgeType), r.entity.Owner) {
+ return runnerErrors.NewBadRequestError("job not meant for this pool manager")
+ }
+ case params.ForgeEntityTypeEnterprise:
+ if !strings.EqualFold(job.Enterprise.Slug, r.entity.Owner) {
+ return runnerErrors.NewBadRequestError("job not meant for this pool manager")
+ }
+ default:
+ return runnerErrors.NewBadRequestError("unknown entity type")
+ }
+
+ return nil
+}
+
+func (r *basePoolManager) GithubRunnerRegistrationToken() (string, error) {
+ tk, ghResp, err := r.ghcli.CreateEntityRegistrationToken(r.ctx)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return "", runnerErrors.NewUnauthorizedError("error fetching token")
+ }
+ return "", fmt.Errorf("error creating runner token: %w", err)
+ }
+ return *tk.Token, nil
+}
+
+func (r *basePoolManager) FetchTools() ([]commonParams.RunnerApplicationDownload, error) {
+ tools, ghResp, err := r.ghcli.ListEntityRunnerApplicationDownloads(r.ctx)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return nil, runnerErrors.NewUnauthorizedError("error fetching tools")
+ }
+ return nil, fmt.Errorf("error fetching runner tools: %w", err)
+ }
+
+ ret := []commonParams.RunnerApplicationDownload{}
+ for _, tool := range tools {
+ if tool == nil {
+ continue
+ }
+ ret = append(ret, commonParams.RunnerApplicationDownload(*tool))
+ }
+ return ret, nil
+}
+
+func (r *basePoolManager) GetWebhookInfo(ctx context.Context) (params.HookInfo, error) {
+ allHooks, err := r.listHooks(ctx)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error listing hooks: %w", err)
+ }
+ trimmedBase := strings.TrimRight(r.controllerInfo.WebhookURL, "/")
+ trimmedController := strings.TrimRight(r.controllerInfo.ControllerWebhookURL, "/")
+
+ var controllerHookInfo *params.HookInfo
+ var baseHookInfo *params.HookInfo
+
+ for _, hook := range allHooks {
+ hookInfo := hookToParamsHookInfo(hook)
+ info := strings.TrimRight(hookInfo.URL, "/")
+ if strings.EqualFold(info, trimmedController) {
+ controllerHookInfo = &hookInfo
+ break
+ }
+ if strings.EqualFold(info, trimmedBase) {
+ baseHookInfo = &hookInfo
+ }
+ }
+
+ // Return the controller hook info if available.
+ if controllerHookInfo != nil {
+ return *controllerHookInfo, nil
+ }
+
+ // Fall back to base hook info if defined.
+ if baseHookInfo != nil {
+ return *baseHookInfo, nil
+ }
+
+ return params.HookInfo{}, runnerErrors.NewNotFoundError("hook not found")
+}
+
+func (r *basePoolManager) RootCABundle() (params.CertificateBundle, error) {
+ return r.entity.Credentials.RootCertificateBundle()
+}
diff --git a/runner/pool/repository.go b/runner/pool/repository.go
deleted file mode 100644
index 86dc5cec..00000000
--- a/runner/pool/repository.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package pool
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
- "sync"
-
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- dbCommon "github.com/cloudbase/garm/database/common"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/google/go-github/v53/github"
- "github.com/pkg/errors"
-)
-
-// test that we implement PoolManager
-var _ poolHelper = &repository{}
-
-func NewRepositoryPoolManager(ctx context.Context, cfg params.Repository, cfgInternal params.Internal, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error) {
- ghc, _, err := util.GithubClient(ctx, cfgInternal.OAuth2Token, cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return nil, errors.Wrap(err, "getting github client")
- }
-
- wg := &sync.WaitGroup{}
- keyMuxes := &keyMutex{}
-
- helper := &repository{
- cfg: cfg,
- cfgInternal: cfgInternal,
- ctx: ctx,
- ghcli: ghc,
- id: cfg.ID,
- store: store,
- }
-
- repo := &basePoolManager{
- ctx: ctx,
- store: store,
- providers: providers,
- controllerID: cfgInternal.ControllerID,
- quit: make(chan struct{}),
- helper: helper,
- credsDetails: cfgInternal.GithubCredentialsDetails,
- wg: wg,
- keyMux: keyMuxes,
- }
- return repo, nil
-}
-
-var _ poolHelper = &repository{}
-
-type repository struct {
- cfg params.Repository
- cfgInternal params.Internal
- ctx context.Context
- ghcli common.GithubClient
- id string
- store dbCommon.Store
-
- mux sync.Mutex
-}
-
-func (r *repository) GithubCLI() common.GithubClient {
- return r.ghcli
-}
-
-func (r *repository) PoolType() params.PoolType {
- return params.RepositoryPool
-}
-
-func (r *repository) GetRunnerInfoFromWorkflow(job params.WorkflowJob) (params.RunnerInfo, error) {
- if err := r.ValidateOwner(job); err != nil {
- return params.RunnerInfo{}, errors.Wrap(err, "validating owner")
- }
- workflow, ghResp, err := r.ghcli.GetWorkflowJobByID(r.ctx, job.Repository.Owner.Login, job.Repository.Name, job.WorkflowJob.ID)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return params.RunnerInfo{}, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching workflow info")
- }
- return params.RunnerInfo{}, errors.Wrap(err, "fetching workflow info")
- }
-
- if workflow.RunnerName != nil {
- return params.RunnerInfo{
- Name: *workflow.RunnerName,
- Labels: workflow.Labels,
- }, nil
- }
- return params.RunnerInfo{}, fmt.Errorf("failed to find runner name from workflow")
-}
-
-func (r *repository) UpdateState(param params.UpdatePoolStateParams) error {
- r.mux.Lock()
- defer r.mux.Unlock()
-
- r.cfg.WebhookSecret = param.WebhookSecret
- if param.InternalConfig != nil {
- r.cfgInternal = *param.InternalConfig
- }
-
- ghc, _, err := util.GithubClient(r.ctx, r.GetGithubToken(), r.cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return errors.Wrap(err, "getting github client")
- }
- r.ghcli = ghc
- return nil
-}
-
-func (r *repository) GetGithubToken() string {
- return r.cfgInternal.OAuth2Token
-}
-
-func (r *repository) GetGithubRunners() ([]*github.Runner, error) {
- opts := github.ListOptions{
- PerPage: 100,
- }
-
- var allRunners []*github.Runner
- for {
- runners, ghResp, err := r.ghcli.ListRunners(r.ctx, r.cfg.Owner, r.cfg.Name, &opts)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching runners")
- }
- return nil, errors.Wrap(err, "fetching runners")
- }
- allRunners = append(allRunners, runners.Runners...)
- if ghResp.NextPage == 0 {
- break
- }
- opts.Page = ghResp.NextPage
- }
-
- return allRunners, nil
-}
-
-func (r *repository) FetchTools() ([]*github.RunnerApplicationDownload, error) {
- r.mux.Lock()
- defer r.mux.Unlock()
- tools, ghResp, err := r.ghcli.ListRunnerApplicationDownloads(r.ctx, r.cfg.Owner, r.cfg.Name)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching tools")
- }
- return nil, errors.Wrap(err, "fetching runner tools")
- }
-
- return tools, nil
-}
-
-func (r *repository) FetchDbInstances() ([]params.Instance, error) {
- return r.store.ListRepoInstances(r.ctx, r.id)
-}
-
-func (r *repository) RemoveGithubRunner(runnerID int64) (*github.Response, error) {
- return r.ghcli.RemoveRunner(r.ctx, r.cfg.Owner, r.cfg.Name, runnerID)
-}
-
-func (r *repository) ListPools() ([]params.Pool, error) {
- pools, err := r.store.ListRepoPools(r.ctx, r.id)
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
- return pools, nil
-}
-
-func (r *repository) GithubURL() string {
- return fmt.Sprintf("%s/%s/%s", r.cfgInternal.GithubCredentialsDetails.BaseURL, r.cfg.Owner, r.cfg.Name)
-}
-
-func (r *repository) JwtToken() string {
- return r.cfgInternal.JWTSecret
-}
-
-func (r *repository) GetGithubRegistrationToken() (string, error) {
- tk, ghResp, err := r.ghcli.CreateRegistrationToken(r.ctx, r.cfg.Owner, r.cfg.Name)
-
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return "", errors.Wrap(runnerErrors.ErrUnauthorized, "fetching token")
- }
- return "", errors.Wrap(err, "creating runner token")
- }
- return *tk.Token, nil
-}
-
-func (r *repository) String() string {
- return fmt.Sprintf("%s/%s", r.cfg.Owner, r.cfg.Name)
-}
-
-func (r *repository) WebhookSecret() string {
- return r.cfg.WebhookSecret
-}
-
-func (r *repository) GetCallbackURL() string {
- return r.cfgInternal.InstanceCallbackURL
-}
-
-func (r *repository) GetMetadataURL() string {
- return r.cfgInternal.InstanceMetadataURL
-}
-
-func (r *repository) FindPoolByTags(labels []string) (params.Pool, error) {
- pool, err := r.store.FindRepositoryPoolByTags(r.ctx, r.id, labels)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching suitable pool")
- }
- return pool, nil
-}
-
-func (r *repository) GetPoolByID(poolID string) (params.Pool, error) {
- pool, err := r.store.GetRepositoryPool(r.ctx, r.id, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (r *repository) ValidateOwner(job params.WorkflowJob) error {
- if !strings.EqualFold(job.Repository.Name, r.cfg.Name) || !strings.EqualFold(job.Repository.Owner.Login, r.cfg.Owner) {
- return runnerErrors.NewBadRequestError("job not meant for this pool manager")
- }
- return nil
-}
-
-func (r *repository) ID() string {
- return r.id
-}
diff --git a/runner/pool/stub_client.go b/runner/pool/stub_client.go
new file mode 100644
index 00000000..0afd6a52
--- /dev/null
+++ b/runner/pool/stub_client.go
@@ -0,0 +1,88 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/google/go-github/v72/github"
+
+ "github.com/cloudbase/garm/params"
+)
+
+type stubGithubClient struct {
+ err error
+}
+
+func (s *stubGithubClient) ListEntityHooks(_ context.Context, _ *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) GetEntityHook(_ context.Context, _ int64) (*github.Hook, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) CreateEntityHook(_ context.Context, _ *github.Hook) (*github.Hook, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) DeleteEntityHook(_ context.Context, _ int64) (*github.Response, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) PingEntityHook(_ context.Context, _ int64) (*github.Response, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) ListEntityRunners(_ context.Context, _ *github.ListRunnersOptions) (*github.Runners, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) ListEntityRunnerApplicationDownloads(_ context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) RemoveEntityRunner(_ context.Context, _ int64) error {
+ return s.err
+}
+
+func (s *stubGithubClient) CreateEntityRegistrationToken(_ context.Context) (*github.RegistrationToken, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) GetEntityJITConfig(_ context.Context, _ string, _ params.Pool, _ []string) (map[string]string, *github.Runner, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) GetWorkflowJobByID(_ context.Context, _, _ string, _ int64) (*github.WorkflowJob, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) GetEntity() params.ForgeEntity {
+ return params.ForgeEntity{}
+}
+
+func (s *stubGithubClient) GithubBaseURL() *url.URL {
+ return nil
+}
+
+func (s *stubGithubClient) RateLimit(_ context.Context) (*github.RateLimits, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) GetEntityRunnerGroupIDByName(_ context.Context, _ string) (int64, error) {
+ return 0, s.err
+}
diff --git a/runner/pool/util.go b/runner/pool/util.go
index 4a8c09e3..d58f90a3 100644
--- a/runner/pool/util.go
+++ b/runner/pool/util.go
@@ -1,67 +1,273 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package pool
import (
- "log"
- "sort"
+ "context"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
"strings"
- "sync"
- "sync/atomic"
+ "time"
+
+ "github.com/google/go-github/v72/github"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/cache"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
"github.com/cloudbase/garm/params"
)
-type poolRoundRobin struct {
- pools []params.Pool
- next uint32
+func instanceInList(instanceName string, instances []commonParams.ProviderInstance) (commonParams.ProviderInstance, bool) {
+ for _, val := range instances {
+ if val.Name == instanceName {
+ return val, true
+ }
+ }
+ return commonParams.ProviderInstance{}, false
}
-func (p *poolRoundRobin) Next() (params.Pool, error) {
- if len(p.pools) == 0 {
- return params.Pool{}, runnerErrors.ErrNoPoolsAvailable
+func controllerIDFromLabels(labels []string) string {
+ for _, lbl := range labels {
+ if strings.HasPrefix(lbl, controllerLabelPrefix) {
+ trimLength := min(len(controllerLabelPrefix)+1, len(lbl))
+ return lbl[trimLength:]
+ }
+ }
+ return ""
+}
+
+func labelsFromRunner(runner forgeRunner) []string {
+ if runner.Labels == nil {
+ return []string{}
}
- n := atomic.AddUint32(&p.next, 1)
- return p.pools[(int(n)-1)%len(p.pools)], nil
+ var labels []string
+ for _, val := range runner.Labels {
+ labels = append(labels, val.Name)
+ }
+ return labels
}
-func (p *poolRoundRobin) Len() int {
- return len(p.pools)
+// isManagedRunner returns true if labels indicate the runner belongs to a pool
+// this manager is responsible for.
+func isManagedRunner(labels []string, controllerID string) bool {
+ runnerControllerID := controllerIDFromLabels(labels)
+ return runnerControllerID == controllerID
}
-func (p *poolRoundRobin) Reset() {
- atomic.StoreUint32(&p.next, 0)
+func composeWatcherFilters(entity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ // We want to watch for changes in either the controller or the
+ // entity itself.
+ return watcher.WithAny(
+ watcher.WithAll(
+ // Updates to the controller
+ watcher.WithEntityTypeFilter(dbCommon.ControllerEntityType),
+ watcher.WithOperationTypeFilter(dbCommon.UpdateOperation),
+ ),
+ // Any operation on the entity we're managing the pool for.
+ watcher.WithEntityFilter(entity),
+ // Watch for changes to the github credentials
+ watcher.WithForgeCredentialsFilter(entity.Credentials),
+ )
}
-type poolsForTags struct {
- pools sync.Map
+func (r *basePoolManager) waitForToolsOrCancel() (hasTools, stopped bool) {
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+ select {
+ case <-ticker.C:
+ if _, err := cache.GetGithubToolsCache(r.entity.ID); err != nil {
+ return false, false
+ }
+ return true, false
+ case <-r.quit:
+ return false, true
+ case <-r.ctx.Done():
+ return false, true
+ }
}
-func (p *poolsForTags) Get(tags []string) (*poolRoundRobin, bool) {
- sort.Strings(tags)
- key := strings.Join(tags, "^")
-
- v, ok := p.pools.Load(key)
- if !ok {
- return nil, false
+func validateHookRequest(controllerID, baseURL string, allHooks []*github.Hook, req *github.Hook) error {
+ parsed, err := url.Parse(baseURL)
+ if err != nil {
+ return fmt.Errorf("error parsing webhook url: %w", err)
}
- return v.(*poolRoundRobin), true
-}
+ partialMatches := []string{}
+ for _, hook := range allHooks {
+ hookURL := strings.ToLower(hook.Config.GetURL())
+ if hookURL == "" {
+ continue
+ }
-func (p *poolsForTags) Add(tags []string, pools []params.Pool) *poolRoundRobin {
- sort.Strings(tags)
- key := strings.Join(tags, "^")
-
- poolRR := &poolRoundRobin{pools: pools}
- v, _ := p.pools.LoadOrStore(key, poolRR)
- return v.(*poolRoundRobin)
-}
-
-func (r *basePoolManager) log(msg string, args ...interface{}) {
- msgArgs := []interface{}{
- r.helper.String(),
+ if hook.Config.GetURL() == req.Config.GetURL() {
+ return runnerErrors.NewConflictError("hook already installed")
+ } else if strings.Contains(hookURL, controllerID) || strings.Contains(hookURL, parsed.Hostname()) {
+ partialMatches = append(partialMatches, hook.Config.GetURL())
+ }
}
- msgArgs = append(msgArgs, args...)
- log.Printf("[Pool mgr %s] "+msg, msgArgs...)
+
+ if len(partialMatches) > 0 {
+ return runnerErrors.NewConflictError("a webhook containing the controller ID or hostname of this contreoller is already installed on this repository")
+ }
+
+ return nil
+}
+
+func hookToParamsHookInfo(hook *github.Hook) params.HookInfo {
+ hookURL := hook.Config.GetURL()
+
+ insecureSSLConfig := hook.Config.GetInsecureSSL()
+ insecureSSL := insecureSSLConfig == "1"
+
+ return params.HookInfo{
+ ID: *hook.ID,
+ URL: hookURL,
+ Events: hook.Events,
+ Active: *hook.Active,
+ InsecureSSL: insecureSSL,
+ }
+}
+
+func (r *basePoolManager) listHooks(ctx context.Context) ([]*github.Hook, error) {
+ opts := github.ListOptions{
+ PerPage: 100,
+ }
+ var allHooks []*github.Hook
+ for {
+ hooks, ghResp, err := r.ghcli.ListEntityHooks(ctx, &opts)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusNotFound {
+ return nil, runnerErrors.NewBadRequestError("repository not found or your PAT does not have access to manage webhooks")
+ }
+ return nil, fmt.Errorf("error fetching hooks: %w", err)
+ }
+ allHooks = append(allHooks, hooks...)
+ if ghResp.NextPage == 0 {
+ break
+ }
+ opts.Page = ghResp.NextPage
+ }
+ return allHooks, nil
+}
+
+func (r *basePoolManager) listRunnersWithPagination() ([]forgeRunner, error) {
+ opts := github.ListRunnersOptions{
+ ListOptions: github.ListOptions{
+ PerPage: 100,
+ },
+ }
+ var allRunners []*github.Runner
+
+ // Paginating like this can lead to a situation where if we have many pages of runners,
+ // while we paginate, a particular runner can move from page n to page n-1 while we move
+ // from page n-1 to page n. In situations such as that, we end up with a list of runners
+ // that does not contain the runner that swapped pages while we were paginating.
+ // Sadly, the GitHub API does not allow listing more than 100 runners per page.
+ for {
+ runners, ghResp, err := r.ghcli.ListEntityRunners(r.ctx, &opts)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return nil, runnerErrors.NewUnauthorizedError("error fetching runners")
+ }
+ return nil, fmt.Errorf("error fetching runners: %w", err)
+ }
+ allRunners = append(allRunners, runners.Runners...)
+ if ghResp.NextPage == 0 {
+ break
+ }
+ opts.Page = ghResp.NextPage
+ }
+
+ ret := make([]forgeRunner, len(allRunners))
+ for idx, val := range allRunners {
+ ret[idx] = forgeRunner{
+ ID: val.GetID(),
+ Name: val.GetName(),
+ Status: val.GetStatus(),
+ Labels: make([]RunnerLabels, len(val.Labels)),
+ }
+ for labelIdx, label := range val.Labels {
+ ret[idx].Labels[labelIdx] = RunnerLabels{
+ Name: label.GetName(),
+ Type: label.GetType(),
+ ID: label.GetID(),
+ }
+ }
+ }
+
+ return ret, nil
+}
+
+func (r *basePoolManager) listRunnersWithScaleSetAPI() ([]forgeRunner, error) {
+ if r.scaleSetClient == nil {
+ return nil, fmt.Errorf("scaleset client not initialized")
+ }
+
+ runners, err := r.scaleSetClient.ListAllRunners(r.ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list runners through scaleset API: %w", err)
+ }
+
+ ret := []forgeRunner{}
+ for _, runner := range runners.RunnerReferences {
+ if runner.RunnerScaleSetID != 0 {
+ // skip scale set runners.
+ continue
+ }
+ run := forgeRunner{
+ Name: runner.Name,
+ ID: runner.ID,
+ Status: string(runner.GetStatus()),
+ Labels: make([]RunnerLabels, len(runner.Labels)),
+ }
+ for labelIDX, label := range runner.Labels {
+ run.Labels[labelIDX] = RunnerLabels{
+ Name: label.Name,
+ Type: label.Type,
+ }
+ }
+ ret = append(ret, run)
+ }
+ return ret, nil
+}
+
+func (r *basePoolManager) GetGithubRunners() ([]forgeRunner, error) {
+ // Gitea has no scale sets API
+ if r.scaleSetClient == nil {
+ return r.listRunnersWithPagination()
+ }
+
+ // try the scale sets API for github
+ runners, err := r.listRunnersWithScaleSetAPI()
+ if err != nil {
+ slog.WarnContext(r.ctx, "failed to list runners via scaleset API; falling back to pagination", "error", err)
+ return r.listRunnersWithPagination()
+ }
+
+ entityInstances := cache.GetEntityInstances(r.entity.ID)
+ if len(entityInstances) > 0 && len(runners) == 0 {
+ // I have trust issues in the undocumented API. We seem to have runners for this
+ // entity, but the scaleset API returned nothing and no error. Fall back to pagination.
+ slog.DebugContext(r.ctx, "the scaleset api returned nothing, but we seem to have runners in the db; falling back to paginated API runner list")
+ return r.listRunnersWithPagination()
+ }
+ slog.DebugContext(r.ctx, "Scaleset API runner list succeeded", "runners", runners)
+ return runners, nil
}
diff --git a/runner/pool/util_test.go b/runner/pool/util_test.go
new file mode 100644
index 00000000..67d31f76
--- /dev/null
+++ b/runner/pool/util_test.go
@@ -0,0 +1,242 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+import (
+ "sync"
+ "testing"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+func TestPoolRoundRobinRollsOver(t *testing.T) {
+ p := &poolRoundRobin{
+ pools: []params.Pool{
+ {
+ ID: "1",
+ },
+ {
+ ID: "2",
+ },
+ },
+ }
+
+ pool, err := p.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "1" {
+ t.Fatalf("expected pool 1, got %s", pool.ID)
+ }
+
+ pool, err = p.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "2" {
+ t.Fatalf("expected pool 2, got %s", pool.ID)
+ }
+
+ pool, err = p.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "1" {
+ t.Fatalf("expected pool 1, got %s", pool.ID)
+ }
+}
+
+func TestPoolRoundRobinEmptyPoolErrorsOut(t *testing.T) {
+ p := &poolRoundRobin{}
+
+ _, err := p.Next()
+ if err == nil {
+ t.Fatalf("expected error, got nil")
+ }
+ if err != runnerErrors.ErrNoPoolsAvailable {
+ t.Fatalf("expected ErrNoPoolsAvailable, got %s", err)
+ }
+}
+
+func TestPoolRoundRobinLen(t *testing.T) {
+ p := &poolRoundRobin{
+ pools: []params.Pool{
+ {
+ ID: "1",
+ },
+ {
+ ID: "2",
+ },
+ },
+ }
+
+ if p.Len() != 2 {
+ t.Fatalf("expected 2, got %d", p.Len())
+ }
+}
+
+func TestPoolRoundRobinReset(t *testing.T) {
+ p := &poolRoundRobin{
+ pools: []params.Pool{
+ {
+ ID: "1",
+ },
+ {
+ ID: "2",
+ },
+ },
+ }
+
+ p.Next()
+ p.Reset()
+ if p.next != 0 {
+ t.Fatalf("expected 0, got %d", p.next)
+ }
+}
+
+func TestPoolsForTagsPackGet(t *testing.T) {
+ p := &poolsForTags{
+ poolCacheType: params.PoolBalancerTypePack,
+ }
+
+ pools := []params.Pool{
+ {
+ ID: "1",
+ Priority: 0,
+ },
+ {
+ ID: "2",
+ Priority: 100,
+ },
+ }
+ _ = p.Add([]string{"key"}, pools)
+ cache, ok := p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ if cache.Len() != 2 {
+ t.Fatalf("expected 2, got %d", cache.Len())
+ }
+
+ poolRR, ok := cache.(*poolRoundRobin)
+ if !ok {
+ t.Fatalf("expected poolRoundRobin, got %v", cache)
+ }
+ if poolRR.next != 0 {
+ t.Fatalf("expected 0, got %d", poolRR.next)
+ }
+ pool, err := poolRR.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "2" {
+ t.Fatalf("expected pool 2, got %s", pool.ID)
+ }
+
+ if poolRR.next != 1 {
+ t.Fatalf("expected 1, got %d", poolRR.next)
+ }
+ // Getting the pool cache again should reset next
+ cache, ok = p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ poolRR, ok = cache.(*poolRoundRobin)
+ if !ok {
+ t.Fatalf("expected poolRoundRobin, got %v", cache)
+ }
+ if poolRR.next != 0 {
+ t.Fatalf("expected 0, got %d", poolRR.next)
+ }
+}
+
+func TestPoolsForTagsRoundRobinGet(t *testing.T) {
+ p := &poolsForTags{
+ poolCacheType: params.PoolBalancerTypeRoundRobin,
+ }
+
+ pools := []params.Pool{
+ {
+ ID: "1",
+ Priority: 0,
+ },
+ {
+ ID: "2",
+ Priority: 100,
+ },
+ }
+ _ = p.Add([]string{"key"}, pools)
+ cache, ok := p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ if cache.Len() != 2 {
+ t.Fatalf("expected 2, got %d", cache.Len())
+ }
+
+ pool, err := cache.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "2" {
+ t.Fatalf("expected pool 2, got %s", pool.ID)
+ }
+ // Getting the pool cache again should not reset next, and
+ // should return the next pool.
+ cache, ok = p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ pool, err = cache.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "1" {
+ t.Fatalf("expected pool 1, got %s", pool.ID)
+ }
+}
+
+func TestPoolsForTagsNoPoolsForTag(t *testing.T) {
+ p := &poolsForTags{
+ pools: sync.Map{},
+ }
+
+ _, ok := p.Get([]string{"key"})
+ if ok {
+ t.Fatalf("expected false, got true")
+ }
+}
+
+func TestPoolsForTagsBalancerTypePack(t *testing.T) {
+ p := &poolsForTags{
+ pools: sync.Map{},
+ poolCacheType: params.PoolBalancerTypePack,
+ }
+
+ poolCache := &poolRoundRobin{}
+ p.pools.Store("key", poolCache)
+
+ cache, ok := p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ if cache != poolCache {
+ t.Fatalf("expected poolCache, got %v", cache)
+ }
+ if poolCache.next != 0 {
+ t.Fatalf("expected 0, got %d", poolCache.next)
+ }
+}
diff --git a/runner/pool/watcher.go b/runner/pool/watcher.go
new file mode 100644
index 00000000..999b52c6
--- /dev/null
+++ b/runner/pool/watcher.go
@@ -0,0 +1,183 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+import (
+ "log/slog"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+ runnerCommon "github.com/cloudbase/garm/runner/common"
+ ghClient "github.com/cloudbase/garm/util/github"
+)
+
+// entityGetter is implemented by all github entities (repositories, organizations and enterprises)
+type entityGetter interface {
+ GetEntity() (params.ForgeEntity, error)
+}
+
+func (r *basePoolManager) handleControllerUpdateEvent(controllerInfo params.ControllerInfo) {
+ r.mux.Lock()
+ defer r.mux.Unlock()
+
+ slog.DebugContext(r.ctx, "updating controller info", "controller_info", controllerInfo)
+ r.controllerInfo = controllerInfo
+}
+
+func (r *basePoolManager) getClientOrStub() runnerCommon.GithubClient {
+ var err error
+ var ghc runnerCommon.GithubClient
+ ghc, err = ghClient.Client(r.ctx, r.entity)
+ if err != nil {
+ slog.WarnContext(r.ctx, "failed to create github client", "error", err)
+ ghc = &stubGithubClient{
+ err: runnerErrors.NewUnauthorizedError("failed to create github client; please update credentials"),
+ }
+ }
+ return ghc
+}
+
+func (r *basePoolManager) handleEntityUpdate(entity params.ForgeEntity, operation common.OperationType) {
+ slog.DebugContext(r.ctx, "received entity operation", "entity", entity.ID, "operation", operation)
+ if r.entity.ID != entity.ID {
+ slog.WarnContext(r.ctx, "entity ID mismatch; stale event? refusing to update", "entity", entity.ID)
+ return
+ }
+
+ if operation == common.DeleteOperation {
+ slog.InfoContext(r.ctx, "entity deleted; closing db consumer", "entity", entity.ID)
+ r.consumer.Close()
+ return
+ }
+
+ if operation != common.UpdateOperation {
+ slog.DebugContext(r.ctx, "operation not update; ignoring", "entity", entity.ID, "operation", operation)
+ return
+ }
+
+ credentialsUpdate := r.entity.Credentials.GetID() != entity.Credentials.GetID()
+ defer func() {
+ slog.DebugContext(r.ctx, "deferred tools update", "credentials_update", credentialsUpdate)
+ if !credentialsUpdate {
+ return
+ }
+ slog.DebugContext(r.ctx, "updating tools", "entity", entity.ID)
+ if err := r.updateTools(); err != nil {
+ slog.ErrorContext(r.ctx, "failed to update tools", "error", err)
+ }
+ }()
+
+ slog.DebugContext(r.ctx, "updating entity", "entity", entity.ID)
+ r.mux.Lock()
+ slog.DebugContext(r.ctx, "lock acquired", "entity", entity.ID)
+
+ r.entity = entity
+ if credentialsUpdate {
+ if r.consumer != nil {
+ filters := composeWatcherFilters(r.entity)
+ r.consumer.SetFilters(filters)
+ }
+ slog.DebugContext(r.ctx, "credentials update", "entity", entity.ID)
+ r.ghcli = r.getClientOrStub()
+ }
+ r.mux.Unlock()
+ slog.DebugContext(r.ctx, "lock released", "entity", entity.ID)
+}
+
+func (r *basePoolManager) handleCredentialsUpdate(credentials params.ForgeCredentials) {
+ // when we switch credentials on an entity (like from one app to another or from an app
+ // to a PAT), we may still get events for the previous credentials as the channel is buffered.
+ // The watcher will watch for changes to the entity itself, which includes events that
+ // change the credentials name on the entity, but we also watch for changes to the credentials
+ // themselves, like an updated PAT token set on existing credentials entity.
+ // The handleCredentialsUpdate function handles situations where we have changes on the
+ // credentials entity itself, not on the entity that the credentials are set on.
+ // For example, we may have a credentials entity called org_pat set on a repo called
+ // test-repo. This function would handle situations where "org_pat" is updated.
+ // If "test-repo" is updated with new credentials, that event is handled above in
+ // handleEntityUpdate.
+ shouldUpdateTools := r.entity.Credentials.GetID() == credentials.GetID()
+ defer func() {
+ if !shouldUpdateTools {
+ return
+ }
+ slog.DebugContext(r.ctx, "deferred tools update", "credentials_id", credentials.GetID())
+ if err := r.updateTools(); err != nil {
+ slog.ErrorContext(r.ctx, "failed to update tools", "error", err)
+ }
+ }()
+
+ r.mux.Lock()
+ if !shouldUpdateTools {
+ slog.InfoContext(r.ctx, "credential ID mismatch; stale event?", "credentials_id", credentials.GetID())
+ r.mux.Unlock()
+ return
+ }
+
+ slog.DebugContext(r.ctx, "updating credentials", "credentials_id", credentials.GetID())
+ r.entity.Credentials = credentials
+ r.ghcli = r.getClientOrStub()
+ r.mux.Unlock()
+}
+
+func (r *basePoolManager) handleWatcherEvent(event common.ChangePayload) {
+ dbEntityType := common.DatabaseEntityType(r.entity.EntityType)
+ switch event.EntityType {
+ case common.GithubCredentialsEntityType, common.GiteaCredentialsEntityType:
+ credentials, ok := event.Payload.(params.ForgeCredentials)
+ if !ok {
+ slog.ErrorContext(r.ctx, "failed to cast payload to github credentials")
+ return
+ }
+ r.handleCredentialsUpdate(credentials)
+ case common.ControllerEntityType:
+ controllerInfo, ok := event.Payload.(params.ControllerInfo)
+ if !ok {
+ slog.ErrorContext(r.ctx, "failed to cast payload to controller info")
+ return
+ }
+ r.handleControllerUpdateEvent(controllerInfo)
+ case dbEntityType:
+ entity, ok := event.Payload.(entityGetter)
+ if !ok {
+ slog.ErrorContext(r.ctx, "failed to cast payload to entity")
+ return
+ }
+ entityInfo, err := entity.GetEntity()
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get entity", "error", err)
+ return
+ }
+ r.handleEntityUpdate(entityInfo, event.Operation)
+ }
+}
+
+func (r *basePoolManager) runWatcher() {
+ defer r.consumer.Close()
+ for {
+ select {
+ case <-r.quit:
+ return
+ case <-r.ctx.Done():
+ return
+ case event, ok := <-r.consumer.Watch():
+ if !ok {
+ return
+ }
+ go r.handleWatcherEvent(event)
+ }
+ }
+}
diff --git a/runner/pools.go b/runner/pools.go
index 8fbe2b0e..ffd3b9c8 100644
--- a/runner/pools.go
+++ b/runner/pools.go
@@ -16,13 +16,12 @@ package runner
import (
"context"
+ "errors"
"fmt"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/auth"
"github.com/cloudbase/garm/params"
-
- "github.com/pkg/errors"
)
func (r *Runner) ListAllPools(ctx context.Context) ([]params.Pool, error) {
@@ -32,7 +31,7 @@ func (r *Runner) ListAllPools(ctx context.Context) ([]params.Pool, error) {
pools, err := r.store.ListAllPools(ctx)
if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
@@ -44,7 +43,7 @@ func (r *Runner) GetPoolByID(ctx context.Context, poolID string) (params.Pool, e
pool, err := r.store.GetPoolByID(ctx, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
return pool, nil
}
@@ -57,7 +56,7 @@ func (r *Runner) DeletePoolByID(ctx context.Context, poolID string) error {
pool, err := r.store.GetPoolByID(ctx, poolID)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
}
return nil
}
@@ -67,7 +66,7 @@ func (r *Runner) DeletePoolByID(ctx context.Context, poolID string) error {
}
if err := r.store.DeletePoolByID(ctx, poolID); err != nil {
- return errors.Wrap(err, "deleting pool")
+ return fmt.Errorf("error deleting pool: %w", err)
}
return nil
}
@@ -79,7 +78,7 @@ func (r *Runner) UpdatePoolByID(ctx context.Context, poolID string, param params
pool, err := r.store.GetPoolByID(ctx, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
maxRunners := pool.MaxRunners
@@ -100,28 +99,14 @@ func (r *Runner) UpdatePoolByID(ctx context.Context, poolID string, param params
return params.Pool{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
}
- if param.Tags != nil && len(param.Tags) > 0 {
- newTags, err := r.processTags(string(pool.OSArch), pool.OSType, param.Tags)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "processing tags")
- }
- param.Tags = newTags
- }
-
- var newPool params.Pool
-
- if pool.RepoID != "" {
- newPool, err = r.store.UpdateRepositoryPool(ctx, pool.RepoID, poolID, param)
- } else if pool.OrgID != "" {
- newPool, err = r.store.UpdateOrganizationPool(ctx, pool.OrgID, poolID, param)
- } else if pool.EnterpriseID != "" {
- newPool, err = r.store.UpdateEnterprisePool(ctx, pool.EnterpriseID, poolID, param)
- } else {
- return params.Pool{}, fmt.Errorf("pool not found to a repo, org or enterprise")
- }
-
+ entity, err := pool.GetEntity()
if err != nil {
- return params.Pool{}, errors.Wrap(err, "updating pool")
+ return params.Pool{}, fmt.Errorf("error getting entity: %w", err)
+ }
+
+ newPool, err := r.store.UpdateEntityPool(ctx, entity, poolID, param)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error updating pool: %w", err)
}
return newPool, nil
}
@@ -133,7 +118,7 @@ func (r *Runner) ListAllJobs(ctx context.Context) ([]params.Job, error) {
jobs, err := r.store.ListAllJobs(ctx)
if err != nil {
- return nil, errors.Wrap(err, "fetching jobs")
+ return nil, fmt.Errorf("error fetching jobs: %w", err)
}
return jobs, nil
}
diff --git a/runner/pools_test.go b/runner/pools_test.go
index db112b69..2a2aea5d 100644
--- a/runner/pools_test.go
+++ b/runner/pools_test.go
@@ -19,6 +19,8 @@ import (
"fmt"
"testing"
+ "github.com/stretchr/testify/suite"
+
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/auth"
"github.com/cloudbase/garm/config"
@@ -27,7 +29,6 @@ import (
garmTesting "github.com/cloudbase/garm/internal/testing"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
- "github.com/stretchr/testify/suite"
)
type PoolTestFixtures struct {
@@ -44,10 +45,15 @@ type PoolTestSuite struct {
suite.Suite
Fixtures *PoolTestFixtures
Runner *Runner
+
+ adminCtx context.Context
+ testCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
}
func (s *PoolTestSuite) SetupTest() {
- adminCtx := auth.GetAdminContext()
+ adminCtx := auth.GetAdminContext(context.Background())
// create testing sqlite database
dbCfg := garmTesting.GetTestSqliteDBConfig(s.T())
@@ -56,18 +62,28 @@ func (s *PoolTestSuite) SetupTest() {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
+ s.adminCtx = garmTesting.ImpersonateAdminContext(adminCtx, db, s.T())
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(s.adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(s.adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(s.adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create an organization for testing purposes
- org, err := db.CreateOrganization(context.Background(), "test-org", "test-creds", "test-webhookSecret")
+ org, err := db.CreateOrganization(s.adminCtx, "test-org", s.testCreds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create org: %s", err))
}
// create some pool objects in the database, for testing purposes
+ entity := params.ForgeEntity{
+ ID: org.ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
orgPools := []params.Pool{}
for i := 1; i <= 3; i++ {
- pool, err := db.CreateOrganizationPool(
- context.Background(),
- org.ID,
+ pool, err := db.CreateEntityPool(
+ adminCtx,
+ entity,
params.CreatePoolParams{
ProviderName: "test-provider",
MaxRunners: 4,
@@ -75,7 +91,7 @@ func (s *PoolTestSuite) SetupTest() {
Image: fmt.Sprintf("test-image-%d", i),
Flavor: "test-flavor",
OSType: "linux",
- Tags: []string{"self-hosted", "amd64", "linux"},
+ Tags: []string{"amd64-linux-runner"},
RunnerBootstrapTimeout: 0,
},
)
@@ -97,6 +113,9 @@ func (s *PoolTestSuite) SetupTest() {
MinIdleRunners: &minIdleRunners,
Image: "test-images-updated",
Flavor: "test-flavor-updated",
+ Tags: []string{
+ "amd64-linux-runner",
+ },
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
@@ -107,10 +126,9 @@ func (s *PoolTestSuite) SetupTest() {
// setup test runner
runner := &Runner{
- providers: fixtures.Providers,
- credentials: fixtures.Credentials,
- store: fixtures.Store,
- ctx: fixtures.AdminContext,
+ providers: fixtures.Providers,
+ store: fixtures.Store,
+ ctx: fixtures.AdminContext,
}
s.Runner = runner
}
@@ -151,7 +169,7 @@ func (s *PoolTestSuite) TestGetPoolByIDNotFound() {
s.Require().Nil(err)
_, err = s.Runner.GetPoolByID(s.Fixtures.AdminContext, s.Fixtures.Pools[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: fetching pool by ID: not found", err.Error())
+ s.Require().Equal("error fetching pool: error fetching pool by ID: not found", err.Error())
}
func (s *PoolTestSuite) TestDeletePoolByID() {
@@ -160,7 +178,7 @@ func (s *PoolTestSuite) TestDeletePoolByID() {
s.Require().Nil(err)
_, err = s.Fixtures.Store.GetPoolByID(s.Fixtures.AdminContext, s.Fixtures.Pools[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool by ID: not found", err.Error())
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
}
func (s *PoolTestSuite) TestDeletePoolByIDErrUnauthorized() {
@@ -202,12 +220,12 @@ func (s *PoolTestSuite) TestTestUpdatePoolByIDInvalidPoolID() {
_, err := s.Runner.UpdatePoolByID(s.Fixtures.AdminContext, "dummy-pool-id", s.Fixtures.UpdatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: fetching pool by ID: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error fetching pool by ID: error parsing id: invalid request", err.Error())
}
func (s *PoolTestSuite) TestTestUpdatePoolByIDRunnerBootstrapTimeoutFailed() {
// this is already created in `SetupTest()`
- var RunnerBootstrapTimeout uint = 0
+ var RunnerBootstrapTimeout uint // default is 0
s.Fixtures.UpdatePoolParams.RunnerBootstrapTimeout = &RunnerBootstrapTimeout
_, err := s.Runner.UpdatePoolByID(s.Fixtures.AdminContext, s.Fixtures.Pools[0].ID, s.Fixtures.UpdatePoolParams)
diff --git a/runner/providers/lxd/specs.go b/runner/providers/common/common.go
similarity index 54%
rename from runner/providers/lxd/specs.go
rename to runner/providers/common/common.go
index 0471a536..f1a5a66d 100644
--- a/runner/providers/lxd/specs.go
+++ b/runner/providers/common/common.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Cloudbase Solutions SRL
+// Copyright 2025 Cloudbase Solutions SRL
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
@@ -12,28 +12,26 @@
// License for the specific language governing permissions and limitations
// under the License.
-package lxd
+package common
import (
- "encoding/json"
-
+ garmErrors "github.com/cloudbase/garm-provider-common/errors"
commonParams "github.com/cloudbase/garm-provider-common/params"
- "github.com/pkg/errors"
+ "github.com/cloudbase/garm/runner/providers/util"
)
-type extraSpecs struct {
- DisableUpdates bool `json:"disable_updates"`
- ExtraPackages []string `json:"extra_packages"`
-}
-
-func parseExtraSpecsFromBootstrapParams(bootstrapParams commonParams.BootstrapInstance) (extraSpecs, error) {
- specs := extraSpecs{}
- if bootstrapParams.ExtraSpecs == nil {
- return specs, nil
+func ValidateResult(inst commonParams.ProviderInstance) error {
+ if inst.ProviderID == "" {
+ return garmErrors.NewProviderError("missing provider ID")
}
- if err := json.Unmarshal(bootstrapParams.ExtraSpecs, &specs); err != nil {
- return specs, errors.Wrap(err, "unmarshaling extra specs")
+ if inst.Name == "" {
+ return garmErrors.NewProviderError("missing instance name")
}
- return specs, nil
+
+ if !util.IsValidProviderStatus(inst.Status) {
+ return garmErrors.NewProviderError("invalid status returned (%s)", inst.Status)
+ }
+
+ return nil
}
diff --git a/runner/providers/external/external.go b/runner/providers/external/external.go
index c1bd7141..46e3dd47 100644
--- a/runner/providers/external/external.go
+++ b/runner/providers/external/external.go
@@ -1,229 +1,37 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package external
import (
"context"
- "encoding/json"
"fmt"
- "log"
- "os/exec"
- "github.com/cloudbase/garm-provider-common/execution"
-
- commonParams "github.com/cloudbase/garm-provider-common/params"
-
- garmErrors "github.com/cloudbase/garm-provider-common/errors"
- garmExec "github.com/cloudbase/garm-provider-common/util/exec"
"github.com/cloudbase/garm/config"
- "github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
-
- "github.com/pkg/errors"
+ v010 "github.com/cloudbase/garm/runner/providers/v0.1.0"
+ v011 "github.com/cloudbase/garm/runner/providers/v0.1.1"
)
-var _ common.Provider = (*external)(nil)
-
+// NewProvider selects the provider based on the interface version
func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
- if cfg.ProviderType != params.ExternalProvider {
- return nil, garmErrors.NewBadRequestError("invalid provider config")
- }
-
- execPath, err := cfg.External.ExecutablePath()
- if err != nil {
- return nil, errors.Wrap(err, "fetching executable path")
- }
- return &external{
- ctx: ctx,
- controllerID: controllerID,
- cfg: cfg,
- execPath: execPath,
- }, nil
-}
-
-type external struct {
- ctx context.Context
- controllerID string
- cfg *config.Provider
- execPath string
-}
-
-func (e *external) validateResult(inst commonParams.ProviderInstance) error {
- if inst.ProviderID == "" {
- return garmErrors.NewProviderError("missing provider ID")
- }
-
- if inst.Name == "" {
- return garmErrors.NewProviderError("missing instance name")
- }
-
- if inst.OSName == "" || inst.OSArch == "" || inst.OSType == "" {
- // we can still function without this info (I think)
- log.Printf("WARNING: missing OS information")
- }
- if !IsValidProviderStatus(inst.Status) {
- return garmErrors.NewProviderError("invalid status returned (%s)", inst.Status)
- }
-
- return nil
-}
-
-// CreateInstance creates a new compute instance in the provider.
-func (e *external) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance) (commonParams.ProviderInstance, error) {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.CreateInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
-
- asJs, err := json.Marshal(bootstrapParams)
- if err != nil {
- return commonParams.ProviderInstance{}, errors.Wrap(err, "serializing bootstrap params")
- }
-
- out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
- if err != nil {
- return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
-
- var param commonParams.ProviderInstance
- if err := json.Unmarshal(out, ¶m); err != nil {
- return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
- }
-
- if err := e.validateResult(param); err != nil {
- return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
- }
-
- retAsJs, _ := json.MarshalIndent(param, "", " ")
- log.Printf("provider returned: %s", string(retAsJs))
- return param, nil
-}
-
-// Delete instance will delete the instance in a provider.
-func (e *external) DeleteInstance(ctx context.Context, instance string) error {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.DeleteInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
-
- _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- var exitErr *exec.ExitError
- if !errors.As(err, &exitErr) || exitErr.ExitCode() != execution.ExitCodeNotFound {
- return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
-
- }
- return nil
-}
-
-// GetInstance will return details about one instance.
-func (e *external) GetInstance(ctx context.Context, instance string) (commonParams.ProviderInstance, error) {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.GetInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
-
- // TODO(gabriel-samfira): handle error types. Of particular insterest is to
- // know when the error is ErrNotFound.
- out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
-
- var param commonParams.ProviderInstance
- if err := json.Unmarshal(out, ¶m); err != nil {
- return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
- }
-
- if err := e.validateResult(param); err != nil {
- return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
- }
-
- return param, nil
-}
-
-// ListInstances will list all instances for a provider.
-func (e *external) ListInstances(ctx context.Context, poolID string) ([]commonParams.ProviderInstance, error) {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.ListInstancesCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_POOL_ID=%s", poolID),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
-
- out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
-
- var param []commonParams.ProviderInstance
- if err := json.Unmarshal(out, ¶m); err != nil {
- return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
- }
-
- ret := make([]commonParams.ProviderInstance, len(param))
- for idx, inst := range param {
- if err := e.validateResult(inst); err != nil {
- return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
- }
- ret[idx] = inst
- }
- return ret, nil
-}
-
-// RemoveAllInstances will remove all instances created by this provider.
-func (e *external) RemoveAllInstances(ctx context.Context) error {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.RemoveAllInstancesCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
- _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
- return nil
-}
-
-// Stop shuts down the instance.
-func (e *external) Stop(ctx context.Context, instance string, force bool) error {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.StopInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
- _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
- return nil
-}
-
-// Start boots up an instance.
-func (e *external) Start(ctx context.Context, instance string) error {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.StartInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
- _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
- return nil
-}
-
-func (e *external) AsParams() params.Provider {
- return params.Provider{
- Name: e.cfg.Name,
- Description: e.cfg.Description,
- ProviderType: e.cfg.ProviderType,
+ switch cfg.External.InterfaceVersion {
+ case common.Version010, "":
+ return v010.NewProvider(ctx, cfg, controllerID)
+ case common.Version011:
+ return v011.NewProvider(ctx, cfg, controllerID)
+ default:
+ return nil, fmt.Errorf("unsupported interface version: %s", cfg.External.InterfaceVersion)
}
}
diff --git a/runner/providers/lxd/images.go b/runner/providers/lxd/images.go
deleted file mode 100644
index faf88b98..00000000
--- a/runner/providers/lxd/images.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lxd
-
-import (
- "fmt"
- "strings"
-
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm/config"
-
- lxd "github.com/lxc/lxd/client"
- "github.com/lxc/lxd/shared/api"
- "github.com/pkg/errors"
-)
-
-type image struct {
- remotes map[string]config.LXDImageRemote
-}
-
-// parseImageName parses the image name that comes in from the config and returns a
-// remote. If no remote is configured with the given name, an error is returned.
-func (i *image) parseImageName(imageName string) (config.LXDImageRemote, string, error) {
- if !strings.Contains(imageName, ":") {
- return config.LXDImageRemote{}, "", fmt.Errorf("image does not include a remote")
- }
-
- details := strings.SplitN(imageName, ":", 2)
- for remoteName, val := range i.remotes {
- if remoteName == details[0] {
- return val, details[1], nil
- }
- }
- return config.LXDImageRemote{}, "", runnerErrors.ErrNotFound
-}
-
-func (i *image) getLocalImageByAlias(imageName string, imageType config.LXDImageType, arch string, cli lxd.InstanceServer) (*api.Image, error) {
- aliases, err := cli.GetImageAliasArchitectures(imageType.String(), imageName)
- if err != nil {
- return nil, errors.Wrapf(err, "resolving alias: %s", imageName)
- }
-
- alias, ok := aliases[arch]
- if !ok {
- return nil, fmt.Errorf("no image found for arch %s and image type %s with name %s", arch, imageType, imageName)
- }
-
- image, _, err := cli.GetImage(alias.Target)
- if err != nil {
- return nil, errors.Wrap(err, "fetching image details")
- }
- return image, nil
-}
-
-func (i *image) getInstanceSource(imageName string, imageType config.LXDImageType, arch string, cli lxd.InstanceServer) (api.InstanceSource, error) {
- instanceSource := api.InstanceSource{
- Type: "image",
- }
- if !strings.Contains(imageName, ":") {
- // A remote was not specified, try to find an image using the imageName as
- // an alias.
- imageDetails, err := i.getLocalImageByAlias(imageName, imageType, arch, cli)
- if err != nil {
- return api.InstanceSource{}, errors.Wrap(err, "fetching image")
- }
- instanceSource.Fingerprint = imageDetails.Fingerprint
- } else {
- remote, parsedName, err := i.parseImageName(imageName)
- if err != nil {
- return api.InstanceSource{}, errors.Wrap(err, "parsing image name")
- }
- instanceSource.Alias = parsedName
- instanceSource.Server = remote.Address
- instanceSource.Protocol = string(remote.Protocol)
- }
- return instanceSource, nil
-}
diff --git a/runner/providers/lxd/lxd.go b/runner/providers/lxd/lxd.go
deleted file mode 100644
index 1e02f6eb..00000000
--- a/runner/providers/lxd/lxd.go
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lxd
-
-import (
- "context"
- "fmt"
- "log"
- "sync"
- "time"
-
- runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm/config"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
-
- "github.com/google/go-github/v53/github"
- lxd "github.com/lxc/lxd/client"
- "github.com/lxc/lxd/shared/api"
- "github.com/pkg/errors"
-
- "github.com/cloudbase/garm-provider-common/cloudconfig"
- commonParams "github.com/cloudbase/garm-provider-common/params"
-)
-
-var _ common.Provider = &LXD{}
-
-const (
- // We look for this key in the config of the instances to determine if they are
- // created by us or not.
- controllerIDKeyName = "user.runner-controller-id"
- poolIDKey = "user.runner-pool-id"
-
- // osTypeKeyName is the key we use in the instance config to indicate the OS
- // platform a runner is supposed to have. This value is defined in the pool and
- // passed into the provider as bootstrap params.
- osTypeKeyName = "user.os-type"
-
- // osArchKeyNAme is the key we use in the instance config to indicate the OS
- // architecture a runner is supposed to have. This value is defined in the pool and
- // passed into the provider as bootstrap params.
- osArchKeyNAme = "user.os-arch"
-)
-
-var (
- // lxdToGithubArchMap translates LXD architectures to Github tools architectures.
- // TODO: move this in a separate package. This will most likely be used
- // by any other provider.
- lxdToGithubArchMap map[string]string = map[string]string{
- "x86_64": "x64",
- "amd64": "x64",
- "armv7l": "arm",
- "aarch64": "arm64",
- "x64": "x64",
- "arm": "arm",
- "arm64": "arm64",
- }
-
- configToLXDArchMap map[commonParams.OSArch]string = map[commonParams.OSArch]string{
- commonParams.Amd64: "x86_64",
- commonParams.Arm64: "aarch64",
- commonParams.Arm: "armv7l",
- }
-
- lxdToConfigArch map[string]commonParams.OSArch = map[string]commonParams.OSArch{
- "x86_64": commonParams.Amd64,
- "aarch64": commonParams.Arm64,
- "armv7l": commonParams.Arm,
- }
-)
-
-const (
- DefaultProjectDescription = "This project was created automatically by garm to be used for github ephemeral action runners."
- DefaultProjectName = "garm-project"
-)
-
-func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
- if err := cfg.Validate(); err != nil {
- return nil, errors.Wrap(err, "validating provider config")
- }
-
- if cfg.ProviderType != params.LXDProvider {
- return nil, fmt.Errorf("invalid provider type %s, expected %s", cfg.ProviderType, params.LXDProvider)
- }
-
- provider := &LXD{
- ctx: ctx,
- cfg: cfg,
- controllerID: controllerID,
- imageManager: &image{
- remotes: cfg.LXD.ImageRemotes,
- },
- }
-
- return provider, nil
-}
-
-type LXD struct {
- // cfg is the provider config for this provider.
- cfg *config.Provider
- // ctx is the context.
- ctx context.Context
- // cli is the LXD client.
- cli lxd.InstanceServer
- // imageManager downloads images from remotes
- imageManager *image
- // controllerID is the ID of this controller
- controllerID string
-
- mux sync.Mutex
-}
-
-func (l *LXD) getCLI() (lxd.InstanceServer, error) {
- l.mux.Lock()
- defer l.mux.Unlock()
-
- if l.cli != nil {
- return l.cli, nil
- }
- cli, err := getClientFromConfig(l.ctx, &l.cfg.LXD)
- if err != nil {
- return nil, errors.Wrap(err, "creating LXD client")
- }
-
- _, _, err = cli.GetProject(projectName(l.cfg.LXD))
- if err != nil {
- return nil, errors.Wrapf(err, "fetching project name: %s", projectName(l.cfg.LXD))
- }
- cli = cli.UseProject(projectName(l.cfg.LXD))
- l.cli = cli
-
- return cli, nil
-}
-
-func (l *LXD) getProfiles(flavor string) ([]string, error) {
- ret := []string{}
- if l.cfg.LXD.IncludeDefaultProfile {
- ret = append(ret, "default")
- }
-
- set := map[string]struct{}{}
-
- cli, err := l.getCLI()
- if err != nil {
- return nil, errors.Wrap(err, "fetching client")
- }
-
- profiles, err := cli.GetProfileNames()
- if err != nil {
- return nil, errors.Wrap(err, "fetching profile names")
- }
- for _, profile := range profiles {
- set[profile] = struct{}{}
- }
-
- if _, ok := set[flavor]; !ok {
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "looking for profile %s", flavor)
- }
-
- ret = append(ret, flavor)
- return ret, nil
-}
-
-func (l *LXD) getTools(tools []*github.RunnerApplicationDownload, osType commonParams.OSType, architecture string) (github.RunnerApplicationDownload, error) {
- // Validate image OS. Linux only for now.
- switch osType {
- case commonParams.Linux:
- default:
- return github.RunnerApplicationDownload{}, fmt.Errorf("this provider does not support OS type: %s", osType)
- }
-
- // Find tools for OS/Arch.
- for _, tool := range tools {
- if tool == nil {
- continue
- }
- if tool.OS == nil || tool.Architecture == nil {
- continue
- }
-
- // fmt.Println(*tool.Architecture, *tool.OS)
- // fmt.Printf("image arch: %s --> osType: %s\n", image.Architecture, string(osType))
- if *tool.Architecture == architecture && *tool.OS == string(osType) {
- return *tool, nil
- }
-
- arch, ok := lxdToGithubArchMap[architecture]
- if ok && arch == *tool.Architecture && *tool.OS == string(osType) {
- return *tool, nil
- }
- }
- return github.RunnerApplicationDownload{}, fmt.Errorf("failed to find tools for OS %s and arch %s", osType, architecture)
-}
-
-// sadly, the security.secureboot flag is a string encoded boolean.
-func (l *LXD) secureBootEnabled() string {
- if l.cfg.LXD.SecureBoot {
- return "true"
- }
- return "false"
-}
-
-func (l *LXD) getCreateInstanceArgs(bootstrapParams commonParams.BootstrapInstance, specs extraSpecs) (api.InstancesPost, error) {
- if bootstrapParams.Name == "" {
- return api.InstancesPost{}, runnerErrors.NewBadRequestError("missing name")
- }
- profiles, err := l.getProfiles(bootstrapParams.Flavor)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "fetching profiles")
- }
-
- arch, err := resolveArchitecture(bootstrapParams.OSArch)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "fetching archictecture")
- }
-
- instanceType := l.cfg.LXD.GetInstanceType()
- instanceSource, err := l.imageManager.getInstanceSource(bootstrapParams.Image, instanceType, arch, l.cli)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "getting instance source")
- }
-
- tools, err := l.getTools(bootstrapParams.Tools, bootstrapParams.OSType, arch)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "getting tools")
- }
-
- bootstrapParams.UserDataOptions.DisableUpdatesOnBoot = specs.DisableUpdates
- bootstrapParams.UserDataOptions.ExtraPackages = specs.ExtraPackages
- cloudCfg, err := cloudconfig.GetCloudConfig(bootstrapParams, tools, bootstrapParams.Name)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "generating cloud-config")
- }
-
- configMap := map[string]string{
- "user.user-data": cloudCfg,
- osTypeKeyName: string(bootstrapParams.OSType),
- osArchKeyNAme: string(bootstrapParams.OSArch),
- controllerIDKeyName: l.controllerID,
- poolIDKey: bootstrapParams.PoolID,
- }
-
- if instanceType == config.LXDImageVirtualMachine {
- configMap["security.secureboot"] = l.secureBootEnabled()
- }
-
- args := api.InstancesPost{
- InstancePut: api.InstancePut{
- Architecture: arch,
- Profiles: profiles,
- Description: "Github runner provisioned by garm",
- Config: configMap,
- },
- Source: instanceSource,
- Name: bootstrapParams.Name,
- Type: api.InstanceType(instanceType),
- }
- return args, nil
-}
-
-func (l *LXD) AsParams() params.Provider {
- return params.Provider{
- Name: l.cfg.Name,
- ProviderType: l.cfg.ProviderType,
- Description: l.cfg.Description,
- }
-}
-
-func (l *LXD) launchInstance(createArgs api.InstancesPost) error {
- cli, err := l.getCLI()
- if err != nil {
- return errors.Wrap(err, "fetching client")
- }
- // Get LXD to create the instance (background operation)
- op, err := cli.CreateInstance(createArgs)
- if err != nil {
- return errors.Wrap(err, "creating instance")
- }
-
- // Wait for the operation to complete
- err = op.Wait()
- if err != nil {
- return errors.Wrap(err, "waiting for instance creation")
- }
-
- // Get LXD to start the instance (background operation)
- reqState := api.InstanceStatePut{
- Action: "start",
- Timeout: -1,
- }
-
- op, err = cli.UpdateInstanceState(createArgs.Name, reqState, "")
- if err != nil {
- return errors.Wrap(err, "starting instance")
- }
-
- // Wait for the operation to complete
- err = op.Wait()
- if err != nil {
- return errors.Wrap(err, "waiting for instance to start")
- }
- return nil
-}
-
-// CreateInstance creates a new compute instance in the provider.
-func (l *LXD) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance) (commonParams.ProviderInstance, error) {
- extraSpecs, err := parseExtraSpecsFromBootstrapParams(bootstrapParams)
- if err != nil {
- return commonParams.ProviderInstance{}, errors.Wrap(err, "parsing extra specs")
- }
- args, err := l.getCreateInstanceArgs(bootstrapParams, extraSpecs)
- if err != nil {
- return commonParams.ProviderInstance{}, errors.Wrap(err, "fetching create args")
- }
-
- if err := l.launchInstance(args); err != nil {
- return commonParams.ProviderInstance{}, errors.Wrap(err, "creating instance")
- }
-
- ret, err := l.waitInstanceHasIP(ctx, args.Name)
- if err != nil {
- return commonParams.ProviderInstance{}, errors.Wrap(err, "fetching instance")
- }
-
- return ret, nil
-}
-
-// GetInstance will return details about one instance.
-func (l *LXD) GetInstance(ctx context.Context, instanceName string) (commonParams.ProviderInstance, error) {
- cli, err := l.getCLI()
- if err != nil {
- return commonParams.ProviderInstance{}, errors.Wrap(err, "fetching client")
- }
- instance, _, err := cli.GetInstanceFull(instanceName)
- if err != nil {
- if isNotFoundError(err) {
- return commonParams.ProviderInstance{}, errors.Wrapf(runnerErrors.ErrNotFound, "fetching instance: %q", err)
- }
- return commonParams.ProviderInstance{}, errors.Wrap(err, "fetching instance")
- }
-
- return lxdInstanceToAPIInstance(instance), nil
-}
-
-// Delete instance will delete the instance in a provider.
-func (l *LXD) DeleteInstance(ctx context.Context, instance string) error {
- cli, err := l.getCLI()
- if err != nil {
- return errors.Wrap(err, "fetching client")
- }
-
- if err := l.setState(instance, "stop", true); err != nil {
- if isNotFoundError(err) {
- log.Printf("received not found error when stopping instance %s", instance)
- return nil
- }
- // I am not proud of this, but the drivers.ErrInstanceIsStopped from LXD pulls in
- // a ton of CGO, linux specific dependencies, that don't make sense having
- // in garm.
- if !(errors.Cause(err).Error() == errInstanceIsStopped.Error()) {
- return errors.Wrap(err, "stopping instance")
- }
- }
-
- opResponse := make(chan struct {
- op lxd.Operation
- err error
- })
- var op lxd.Operation
- go func() {
- op, err := cli.DeleteInstance(instance)
- opResponse <- struct {
- op lxd.Operation
- err error
- }{op: op, err: err}
- }()
-
- select {
- case resp := <-opResponse:
- if resp.err != nil {
- if isNotFoundError(resp.err) {
- log.Printf("received not found error when deleting instance %s", instance)
- return nil
- }
- return errors.Wrap(resp.err, "removing instance")
- }
- op = resp.op
- case <-time.After(time.Second * 60):
- return errors.Wrapf(runnerErrors.ErrTimeout, "removing instance %s", instance)
- }
-
- opTimeout, cancel := context.WithTimeout(context.Background(), time.Second*60)
- defer cancel()
- err = op.WaitContext(opTimeout)
- if err != nil {
- if isNotFoundError(err) {
- log.Printf("received not found error when waiting for instance deletion %s", instance)
- return nil
- }
- return errors.Wrap(err, "waiting for instance deletion")
- }
- return nil
-}
-
-type listResponse struct {
- instances []api.InstanceFull
- err error
-}
-
-// ListInstances will list all instances for a provider.
-func (l *LXD) ListInstances(ctx context.Context, poolID string) ([]commonParams.ProviderInstance, error) {
- cli, err := l.getCLI()
- if err != nil {
- return []commonParams.ProviderInstance{}, errors.Wrap(err, "fetching client")
- }
-
- result := make(chan listResponse, 1)
-
- go func() {
- // TODO(gabriel-samfira): if this blocks indefinitely, we will leak a goroutine.
- // Convert the internal provider to an external one. Running the provider as an
- // external process will allow us to not care if a goroutine leaks. Once a timeout
- // is reached, the provider can just exit with an error. Something we can't do with
- // internal providers.
- instances, err := cli.GetInstancesFull(api.InstanceTypeAny)
- result <- listResponse{
- instances: instances,
- err: err,
- }
- }()
-
- var instances []api.InstanceFull
- select {
- case res := <-result:
- if res.err != nil {
- return []commonParams.ProviderInstance{}, errors.Wrap(res.err, "fetching instances")
- }
- instances = res.instances
- case <-time.After(time.Second * 60):
- return []commonParams.ProviderInstance{}, errors.Wrap(runnerErrors.ErrTimeout, "fetching instances from provider")
- }
-
- ret := []commonParams.ProviderInstance{}
-
- for _, instance := range instances {
- if id, ok := instance.ExpandedConfig[controllerIDKeyName]; ok && id == l.controllerID {
- if poolID != "" {
- id := instance.ExpandedConfig[poolIDKey]
- if id != poolID {
- // Pool ID was specified. Filter out instances belonging to other pools.
- continue
- }
- }
- ret = append(ret, lxdInstanceToAPIInstance(&instance))
- }
- }
-
- return ret, nil
-}
-
-// RemoveAllInstances will remove all instances created by this provider.
-func (l *LXD) RemoveAllInstances(ctx context.Context) error {
- instances, err := l.ListInstances(ctx, "")
- if err != nil {
- return errors.Wrap(err, "fetching instance list")
- }
-
- for _, instance := range instances {
- // TODO: remove in parallel
- if err := l.DeleteInstance(ctx, instance.Name); err != nil {
- return errors.Wrapf(err, "removing instance %s", instance.Name)
- }
- }
-
- return nil
-}
-
-func (l *LXD) setState(instance, state string, force bool) error {
- reqState := api.InstanceStatePut{
- Action: state,
- Timeout: -1,
- Force: force,
- }
-
- cli, err := l.getCLI()
- if err != nil {
- return errors.Wrap(err, "fetching client")
- }
-
- op, err := cli.UpdateInstanceState(instance, reqState, "")
- if err != nil {
- return errors.Wrapf(err, "setting state to %s", state)
- }
- ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second*60)
- defer cancel()
- err = op.WaitContext(ctxTimeout)
- if err != nil {
- return errors.Wrapf(err, "waiting for instance to transition to state %s", state)
- }
- return nil
-}
-
-// Stop shuts down the instance.
-func (l *LXD) Stop(ctx context.Context, instance string, force bool) error {
- return l.setState(instance, "stop", force)
-}
-
-// Start boots up an instance.
-func (l *LXD) Start(ctx context.Context, instance string) error {
- return l.setState(instance, "start", false)
-}
diff --git a/runner/providers/lxd/util.go b/runner/providers/lxd/util.go
deleted file mode 100644
index 2168bcec..00000000
--- a/runner/providers/lxd/util.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lxd
-
-import (
- "context"
- "database/sql"
- "fmt"
- "log"
- "net"
- "net/http"
- "os"
- "strings"
- "time"
-
- commonParams "github.com/cloudbase/garm-provider-common/params"
-
- "github.com/cloudbase/garm-provider-common/util"
- "github.com/cloudbase/garm/config"
-
- "github.com/juju/clock"
- "github.com/juju/retry"
- lxd "github.com/lxc/lxd/client"
- "github.com/lxc/lxd/shared/api"
- "github.com/pkg/errors"
-)
-
-var (
- //lint:ignore ST1005 imported error from lxd
- errInstanceIsStopped error = fmt.Errorf("The instance is already stopped")
-)
-
-var httpResponseErrors = map[int][]error{
- http.StatusNotFound: {os.ErrNotExist, sql.ErrNoRows},
-}
-
-// isNotFoundError returns true if the error is considered a Not Found error.
-func isNotFoundError(err error) bool {
- if api.StatusErrorCheck(err, http.StatusNotFound) {
- return true
- }
-
- for _, checkErr := range httpResponseErrors[http.StatusNotFound] {
- if errors.Is(err, checkErr) {
- return true
- }
- }
-
- return false
-}
-
-func lxdInstanceToAPIInstance(instance *api.InstanceFull) commonParams.ProviderInstance {
- lxdOS, ok := instance.ExpandedConfig["image.os"]
- if !ok {
- log.Printf("failed to find OS in instance config")
- }
-
- osType, err := util.OSToOSType(lxdOS)
- if err != nil {
- log.Printf("failed to find OS type for OS %s", lxdOS)
- }
-
- if osType == "" {
- osTypeFromTag, ok := instance.ExpandedConfig[osTypeKeyName]
- if !ok {
- log.Printf("failed to find OS type in fallback location")
- }
- osType = commonParams.OSType(osTypeFromTag)
- }
-
- osRelease, ok := instance.ExpandedConfig["image.release"]
- if !ok {
- log.Printf("failed to find OS release instance config")
- }
-
- state := instance.State
- addresses := []commonParams.Address{}
- if state.Network != nil {
- for _, details := range state.Network {
- for _, addr := range details.Addresses {
- if addr.Scope != "global" {
- continue
- }
- addresses = append(addresses, commonParams.Address{
- Address: addr.Address,
- Type: commonParams.PublicAddress,
- })
- }
- }
- }
-
- instanceArch, ok := lxdToConfigArch[instance.Architecture]
- if !ok {
- log.Printf("failed to find OS architecture")
- }
-
- return commonParams.ProviderInstance{
- OSArch: instanceArch,
- ProviderID: instance.Name,
- Name: instance.Name,
- OSType: osType,
- OSName: strings.ToLower(lxdOS),
- OSVersion: osRelease,
- Addresses: addresses,
- Status: lxdStatusToProviderStatus(state.Status),
- }
-}
-
-func lxdStatusToProviderStatus(status string) commonParams.InstanceStatus {
- switch status {
- case "Running":
- return commonParams.InstanceRunning
- case "Stopped":
- return commonParams.InstanceStopped
- default:
- return commonParams.InstanceStatusUnknown
- }
-}
-
-func getClientFromConfig(ctx context.Context, cfg *config.LXD) (cli lxd.InstanceServer, err error) {
- if cfg.UnixSocket != "" {
- return lxd.ConnectLXDUnixWithContext(ctx, cfg.UnixSocket, nil)
- }
-
- var srvCrtContents, tlsCAContents, clientCertContents, clientKeyContents []byte
-
- if cfg.TLSServerCert != "" {
- srvCrtContents, err = os.ReadFile(cfg.TLSServerCert)
- if err != nil {
- return nil, errors.Wrap(err, "reading TLSServerCert")
- }
- }
-
- if cfg.TLSCA != "" {
- tlsCAContents, err = os.ReadFile(cfg.TLSCA)
- if err != nil {
- return nil, errors.Wrap(err, "reading TLSCA")
- }
- }
-
- if cfg.ClientCertificate != "" {
- clientCertContents, err = os.ReadFile(cfg.ClientCertificate)
- if err != nil {
- return nil, errors.Wrap(err, "reading ClientCertificate")
- }
- }
-
- if cfg.ClientKey != "" {
- clientKeyContents, err = os.ReadFile(cfg.ClientKey)
- if err != nil {
- return nil, errors.Wrap(err, "reading ClientKey")
- }
- }
-
- connectArgs := lxd.ConnectionArgs{
- TLSServerCert: string(srvCrtContents),
- TLSCA: string(tlsCAContents),
- TLSClientCert: string(clientCertContents),
- TLSClientKey: string(clientKeyContents),
- }
-
- lxdCLI, err := lxd.ConnectLXD(cfg.URL, &connectArgs)
- if err != nil {
- return nil, errors.Wrap(err, "connecting to LXD")
- }
-
- return lxdCLI, nil
-}
-
-func projectName(cfg config.LXD) string {
- if cfg.ProjectName != "" {
- return cfg.ProjectName
- }
- return DefaultProjectName
-}
-
-func resolveArchitecture(osArch commonParams.OSArch) (string, error) {
- if string(osArch) == "" {
- return configToLXDArchMap[commonParams.Amd64], nil
- }
- arch, ok := configToLXDArchMap[osArch]
- if !ok {
- return "", fmt.Errorf("architecture %s is not supported", osArch)
- }
- return arch, nil
-}
-
-// waitDeviceActive is a function capable of figuring out when a Equinix Metal
-// device is active
-func (l *LXD) waitInstanceHasIP(ctx context.Context, instanceName string) (commonParams.ProviderInstance, error) {
- var p commonParams.ProviderInstance
- var errIPNotFound error = fmt.Errorf("ip not found")
- err := retry.Call(retry.CallArgs{
- Func: func() error {
- var err error
- p, err = l.GetInstance(ctx, instanceName)
- if err != nil {
- return errors.Wrap(err, "fetching instance")
- }
- for _, addr := range p.Addresses {
- ip := net.ParseIP(addr.Address)
- if ip == nil {
- continue
- }
- if ip.To4() == nil {
- continue
- }
- return nil
- }
- return errIPNotFound
- },
- Attempts: 20,
- Delay: 5 * time.Second,
- Clock: clock.WallClock,
- })
-
- if err != nil && err != errIPNotFound {
- return commonParams.ProviderInstance{}, err
- }
-
- return p, nil
-}
diff --git a/runner/providers/providers.go b/runner/providers/providers.go
index 2e1f0d1b..ada11729 100644
--- a/runner/providers/providers.go
+++ b/runner/providers/providers.go
@@ -16,15 +16,13 @@ package providers
import (
"context"
- "log"
+ "fmt"
+ "log/slog"
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/runner/providers/external"
- "github.com/cloudbase/garm/runner/providers/lxd"
-
- "github.com/pkg/errors"
)
// LoadProvidersFromConfig loads all providers from the config and populates
@@ -32,22 +30,19 @@ import (
func LoadProvidersFromConfig(ctx context.Context, cfg config.Config, controllerID string) (map[string]common.Provider, error) {
providers := make(map[string]common.Provider, len(cfg.Providers))
for _, providerCfg := range cfg.Providers {
- log.Printf("Loading provider %s", providerCfg.Name)
+ slog.InfoContext(
+ ctx, "Loading provider",
+ "provider", providerCfg.Name)
switch providerCfg.ProviderType {
- case params.LXDProvider:
- conf := providerCfg
- provider, err := lxd.NewProvider(ctx, &conf, controllerID)
- if err != nil {
- return nil, errors.Wrap(err, "creating provider")
- }
- providers[providerCfg.Name] = provider
case params.ExternalProvider:
conf := providerCfg
provider, err := external.NewProvider(ctx, &conf, controllerID)
if err != nil {
- return nil, errors.Wrap(err, "creating provider")
+ return nil, fmt.Errorf("error creating provider: %w", err)
}
providers[providerCfg.Name] = provider
+ default:
+ return nil, fmt.Errorf("unknown provider type %s", providerCfg.ProviderType)
}
}
return providers, nil
diff --git a/runner/providers/external/util.go b/runner/providers/util/util.go
similarity index 52%
rename from runner/providers/external/util.go
rename to runner/providers/util/util.go
index 460714e9..fb3c12bd 100644
--- a/runner/providers/external/util.go
+++ b/runner/providers/util/util.go
@@ -1,4 +1,18 @@
-package external
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
import (
commonParams "github.com/cloudbase/garm-provider-common/params"
diff --git a/runner/providers/v0.1.0/external.go b/runner/providers/v0.1.0/external.go
new file mode 100644
index 00000000..bb96f4d7
--- /dev/null
+++ b/runner/providers/v0.1.0/external.go
@@ -0,0 +1,339 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package v010
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "os/exec"
+
+ garmErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonExecution "github.com/cloudbase/garm-provider-common/execution/common"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ garmExec "github.com/cloudbase/garm-provider-common/util/exec"
+ "github.com/cloudbase/garm/config"
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner/common"
+ commonExternal "github.com/cloudbase/garm/runner/providers/common"
+)
+
+var _ common.Provider = (*external)(nil)
+
+// NewProvider creates a legacy external provider.
+func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
+ if cfg.ProviderType != params.ExternalProvider {
+ return nil, garmErrors.NewBadRequestError("invalid provider config")
+ }
+
+ execPath, err := cfg.External.ExecutablePath()
+ if err != nil {
+ return nil, fmt.Errorf("error fetching executable path: %w", err)
+ }
+
+ // Set GARM_INTERFACE_VERSION to the version of the interface that the external
+ // provider implements. This is used to ensure compatibility between the external
+ // provider and garm
+
+ envVars := cfg.External.GetEnvironmentVariables()
+ envVars = append(envVars, fmt.Sprintf("GARM_INTERFACE_VERSION=%s", common.Version010))
+
+ return &external{
+ ctx: ctx,
+ controllerID: controllerID,
+ cfg: cfg,
+ execPath: execPath,
+ environmentVariables: envVars,
+ }, nil
+}
+
+type external struct {
+ ctx context.Context
+ controllerID string
+ cfg *config.Provider
+ execPath string
+ environmentVariables []string
+}
+
+// CreateInstance creates a new compute instance in the provider.
+func (e *external) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, _ common.CreateInstanceParams) (commonParams.ProviderInstance, error) {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.CreateInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ asJs, err := json.Marshal(bootstrapParams)
+ if err != nil {
+ return commonParams.ProviderInstance{}, fmt.Errorf("error serializing bootstrap params: %w", err)
+ }
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ if err := commonExternal.ValidateResult(param); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+
+ retAsJs, _ := json.MarshalIndent(param, "", " ")
+ slog.DebugContext(
+ ctx, "provider returned",
+ "output", string(retAsJs))
+ return param, nil
+}
+
+// Delete instance will delete the instance in a provider.
+func (e *external) DeleteInstance(ctx context.Context, instance string, _ common.DeleteInstanceParams) error {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.DeleteInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "DeleteInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ var exitErr *exec.ExitError
+ if !errors.As(err, &exitErr) || exitErr.ExitCode() != commonExecution.ExitCodeNotFound {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "DeleteInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ }
+ return nil
+}
+
+// GetInstance will return details about one instance.
+func (e *external) GetInstance(ctx context.Context, instance string, _ common.GetInstanceParams) (commonParams.ProviderInstance, error) {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.GetInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): handle error types. Of particular interest is to
+ // know when the error is ErrNotFound.
+ metrics.InstanceOperationCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ if err := commonExternal.ValidateResult(param); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+
+ return param, nil
+}
+
+// ListInstances will list all instances for a provider.
+func (e *external) ListInstances(ctx context.Context, poolID string, _ common.ListInstancesParams) ([]commonParams.ProviderInstance, error) {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.ListInstancesCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_POOL_ID=%s", poolID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param []commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ ret := make([]commonParams.ProviderInstance, len(param))
+ for idx, inst := range param {
+ if err := commonExternal.ValidateResult(inst); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+ ret[idx] = inst
+ }
+ return ret, nil
+}
+
+// RemoveAllInstances will remove all instances created by this provider.
+func (e *external) RemoveAllInstances(ctx context.Context, _ common.RemoveAllInstancesParams) error {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.RemoveAllInstancesCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "RemoveAllInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "RemoveAllInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+// Stop shuts down the instance.
+func (e *external) Stop(ctx context.Context, instance string, _ common.StopParams) error {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StopInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "Stop", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "Stop", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+// Start boots up an instance.
+func (e *external) Start(ctx context.Context, instance string, _ common.StartParams) error {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StartInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "Start", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "Start", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+func (e *external) AsParams() params.Provider {
+ return params.Provider{
+ Name: e.cfg.Name,
+ Description: e.cfg.Description,
+ ProviderType: e.cfg.ProviderType,
+ }
+}
+
+// DisableJITConfig tells us if the provider explicitly disables JIT configuration and
+// forces runner registration tokens to be used. This may happen if a provider has not yet
+// been updated to support JIT configuration.
+func (e *external) DisableJITConfig() bool {
+ if e.cfg == nil {
+ return false
+ }
+ return e.cfg.DisableJITConfig
+}
diff --git a/runner/providers/v0.1.1/external.go b/runner/providers/v0.1.1/external.go
new file mode 100644
index 00000000..6e43dce7
--- /dev/null
+++ b/runner/providers/v0.1.1/external.go
@@ -0,0 +1,399 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package v011
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "os/exec"
+
+ garmErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonExecution "github.com/cloudbase/garm-provider-common/execution/common"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ garmExec "github.com/cloudbase/garm-provider-common/util/exec"
+ "github.com/cloudbase/garm/config"
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner/common"
+ commonExternal "github.com/cloudbase/garm/runner/providers/common"
+)
+
+var _ common.Provider = (*external)(nil)
+
+func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
+ if cfg.ProviderType != params.ExternalProvider {
+ return nil, garmErrors.NewBadRequestError("invalid provider config")
+ }
+
+ execPath, err := cfg.External.ExecutablePath()
+ if err != nil {
+ return nil, fmt.Errorf("error fetching executable path: %w", err)
+ }
+
+ // Set GARM_INTERFACE_VERSION to the version of the interface that the external
+ // provider implements. This is used to ensure compatibility between the external
+ // provider and garm
+ envVars := cfg.External.GetEnvironmentVariables()
+ envVars = append(envVars, fmt.Sprintf("GARM_INTERFACE_VERSION=%s", cfg.External.InterfaceVersion))
+
+ return &external{
+ ctx: ctx,
+ controllerID: controllerID,
+ cfg: cfg,
+ execPath: execPath,
+ environmentVariables: envVars,
+ }, nil
+}
+
+type external struct {
+ ctx context.Context
+ cfg *config.Provider
+ controllerID string
+ execPath string
+ environmentVariables []string
+}
+
+// CreateInstance creates a new compute instance in the provider.
+func (e *external) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, _ common.CreateInstanceParams) (commonParams.ProviderInstance, error) {
+ extraspecs := bootstrapParams.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return commonParams.ProviderInstance{}, fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.CreateInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ asJs, err := json.Marshal(bootstrapParams)
+ if err != nil {
+ return commonParams.ProviderInstance{}, fmt.Errorf("error serializing bootstrap params: %w", err)
+ }
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ if err := commonExternal.ValidateResult(param); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+
+ retAsJs, _ := json.MarshalIndent(param, "", " ")
+ slog.DebugContext(
+ ctx, "provider returned",
+ "output", string(retAsJs))
+ return param, nil
+}
+
+// Delete instance will delete the instance in a provider.
+func (e *external) DeleteInstance(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams) error {
+ extraspecs := deleteInstanceParams.DeleteInstanceV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.DeleteInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", deleteInstanceParams.DeleteInstanceV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "DeleteInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ _, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ var exitErr *exec.ExitError
+ if !errors.As(err, &exitErr) || exitErr.ExitCode() != commonExecution.ExitCodeNotFound {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "DeleteInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ }
+ return nil
+}
+
+// GetInstance will return details about one instance.
+func (e *external) GetInstance(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams) (commonParams.ProviderInstance, error) {
+ extraspecs := getInstanceParams.GetInstanceV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return commonParams.ProviderInstance{}, fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.GetInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", getInstanceParams.GetInstanceV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): handle error types. Of particular interest is to
+ // know when the error is ErrNotFound.
+ metrics.InstanceOperationCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ if err := commonExternal.ValidateResult(param); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+
+ return param, nil
+}
+
+// ListInstances will list all instances for a provider.
+func (e *external) ListInstances(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams) ([]commonParams.ProviderInstance, error) {
+ extraspecs := listInstancesParams.ListInstancesV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return []commonParams.ProviderInstance{}, fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.ListInstancesCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_POOL_ID=%s", poolID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err == nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param []commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ ret := make([]commonParams.ProviderInstance, len(param))
+ for idx, inst := range param {
+ if err := commonExternal.ValidateResult(inst); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+ ret[idx] = inst
+ }
+ return ret, nil
+}
+
+// RemoveAllInstances will remove all instances created by this provider.
+func (e *external) RemoveAllInstances(ctx context.Context, removeAllInstances common.RemoveAllInstancesParams) error {
+ extraspecs := removeAllInstances.RemoveAllInstancesV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.RemoveAllInstancesCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", removeAllInstances.RemoveAllInstancesV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "RemoveAllInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ _, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "RemoveAllInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+// Stop shuts down the instance.
+func (e *external) Stop(ctx context.Context, instance string, stopParams common.StopParams) error {
+ extraspecs := stopParams.StopV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StopInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", stopParams.StopV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "Stop", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ _, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "Stop", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+// Start boots up an instance.
+func (e *external) Start(ctx context.Context, instance string, startParams common.StartParams) error {
+ extraspecs := startParams.StartV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StartInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", startParams.StartV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "Start", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ _, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "Start", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+func (e *external) AsParams() params.Provider {
+ return params.Provider{
+ Name: e.cfg.Name,
+ Description: e.cfg.Description,
+ ProviderType: e.cfg.ProviderType,
+ }
+}
+
+// DisableJITConfig tells us if the provider explicitly disables JIT configuration and
+// forces runner registration tokens to be used. This may happen if a provider has not yet
+// been updated to support JIT configuration.
+func (e *external) DisableJITConfig() bool {
+ if e.cfg == nil {
+ return false
+ }
+ return e.cfg.DisableJITConfig
+}
diff --git a/runner/repositories.go b/runner/repositories.go
index cf5191dd..0f21d882 100644
--- a/runner/repositories.go
+++ b/runner/repositories.go
@@ -16,8 +16,9 @@ package runner
import (
"context"
+ "errors"
"fmt"
- "log"
+ "log/slog"
"strings"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
@@ -25,8 +26,6 @@ import (
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/pkg/errors"
)
func (r *Runner) CreateRepository(ctx context.Context, param params.CreateRepoParams) (repo params.Repository, err error) {
@@ -35,57 +34,72 @@ func (r *Runner) CreateRepository(ctx context.Context, param params.CreateRepoPa
}
if err := param.Validate(); err != nil {
- return params.Repository{}, errors.Wrap(err, "validating params")
+ return params.Repository{}, fmt.Errorf("error validating params: %w", err)
}
- creds, ok := r.credentials[param.CredentialsName]
- if !ok {
+ var creds params.ForgeCredentials
+ switch param.ForgeType {
+ case params.GithubEndpointType:
+ creds, err = r.store.GetGithubCredentialsByName(ctx, param.CredentialsName, true)
+ case params.GiteaEndpointType:
+ creds, err = r.store.GetGiteaCredentialsByName(ctx, param.CredentialsName, true)
+ default:
+ creds, err = r.ResolveForgeCredentialByName(ctx, param.CredentialsName)
+ }
+
+ if err != nil {
return params.Repository{}, runnerErrors.NewBadRequestError("credentials %s not defined", param.CredentialsName)
}
- _, err = r.store.GetRepository(ctx, param.Owner, param.Name)
+ _, err = r.store.GetRepository(ctx, param.Owner, param.Name, creds.Endpoint.Name)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
}
} else {
return params.Repository{}, runnerErrors.NewConflictError("repository %s/%s already exists", param.Owner, param.Name)
}
- repo, err = r.store.CreateRepository(ctx, param.Owner, param.Name, creds.Name, param.WebhookSecret)
+ repo, err = r.store.CreateRepository(ctx, param.Owner, param.Name, creds, param.WebhookSecret, param.PoolBalancerType)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "creating repository")
+ return params.Repository{}, fmt.Errorf("error creating repository: %w", err)
}
defer func() {
if err != nil {
if deleteErr := r.store.DeleteRepository(ctx, repo.ID); deleteErr != nil {
- log.Printf("failed to delete repository: %s", deleteErr)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to delete repository",
+ "repository_id", repo.ID)
}
}
}()
+ // Use the admin context in the pool manager. Any access control is already done above when
+ // updating the store.
poolMgr, err := r.poolManagerCtrl.CreateRepoPoolManager(r.ctx, repo, r.providers, r.store)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "creating repo pool manager")
+ return params.Repository{}, fmt.Errorf("error creating repo pool manager: %w", err)
}
if err := poolMgr.Start(); err != nil {
if deleteErr := r.poolManagerCtrl.DeleteRepoPoolManager(repo); deleteErr != nil {
- log.Printf("failed to cleanup pool manager for repo %s", repo.ID)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to cleanup pool manager for repo",
+ "repository_id", repo.ID)
}
- return params.Repository{}, errors.Wrap(err, "starting repo pool manager")
+ return params.Repository{}, fmt.Errorf("error starting repo pool manager: %w", err)
}
return repo, nil
}
-func (r *Runner) ListRepositories(ctx context.Context) ([]params.Repository, error) {
+func (r *Runner) ListRepositories(ctx context.Context, filter params.RepositoryFilter) ([]params.Repository, error) {
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
- repos, err := r.store.ListRepositories(ctx)
+ repos, err := r.store.ListRepositories(ctx, filter)
if err != nil {
- return nil, errors.Wrap(err, "listing repositories")
+ return nil, fmt.Errorf("error listing repositories: %w", err)
}
var allRepos []params.Repository
@@ -111,7 +125,7 @@ func (r *Runner) GetRepositoryByID(ctx context.Context, repoID string) (params.R
repo, err := r.store.GetRepositoryByID(ctx, repoID)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repository")
+ return params.Repository{}, fmt.Errorf("error fetching repository: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
@@ -123,36 +137,65 @@ func (r *Runner) GetRepositoryByID(ctx context.Context, repoID string) (params.R
return repo, nil
}
-func (r *Runner) DeleteRepository(ctx context.Context, repoID string) error {
+func (r *Runner) DeleteRepository(ctx context.Context, repoID string, keepWebhook bool) error {
if !auth.IsAdmin(ctx) {
return runnerErrors.ErrUnauthorized
}
repo, err := r.store.GetRepositoryByID(ctx, repoID)
if err != nil {
- return errors.Wrap(err, "fetching repo")
+ return fmt.Errorf("error fetching repo: %w", err)
}
- pools, err := r.store.ListRepoPools(ctx, repoID)
+ entity, err := repo.GetEntity()
if err != nil {
- return errors.Wrap(err, "fetching repo pools")
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ pools, err := r.store.ListEntityPools(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching repo pools: %w", err)
}
if len(pools) > 0 {
- poolIds := []string{}
+ poolIDs := []string{}
for _, pool := range pools {
- poolIds = append(poolIds, pool.ID)
+ poolIDs = append(poolIDs, pool.ID)
}
- return runnerErrors.NewBadRequestError("repo has pools defined (%s)", strings.Join(poolIds, ", "))
+ return runnerErrors.NewBadRequestError("repo has pools defined (%s)", strings.Join(poolIDs, ", "))
+ }
+
+ scaleSets, err := r.store.ListEntityScaleSets(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching repo scale sets: %w", err)
+ }
+
+ if len(scaleSets) > 0 {
+ return runnerErrors.NewBadRequestError("repo has scale sets defined; delete them first")
+ }
+
+ if !keepWebhook && r.config.Default.EnableWebhookManagement {
+ poolMgr, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager: %w", err)
+ }
+
+ if err := poolMgr.UninstallWebhook(ctx); err != nil {
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): Should we error out here?
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to uninstall webhook",
+ "pool_manager_id", poolMgr.ID())
+ }
}
if err := r.poolManagerCtrl.DeleteRepoPoolManager(repo); err != nil {
- return errors.Wrap(err, "deleting repo pool manager")
+ return fmt.Errorf("error deleting repo pool manager: %w", err)
}
if err := r.store.DeleteRepository(ctx, repoID); err != nil {
- return errors.Wrap(err, "removing repository")
+ return fmt.Errorf("error removing repository: %w", err)
}
return nil
}
@@ -165,26 +208,21 @@ func (r *Runner) UpdateRepository(ctx context.Context, repoID string, param para
r.mux.Lock()
defer r.mux.Unlock()
- repo, err := r.store.GetRepositoryByID(ctx, repoID)
- if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
+ switch param.PoolBalancerType {
+ case params.PoolBalancerTypeRoundRobin, params.PoolBalancerTypePack, params.PoolBalancerTypeNone:
+ default:
+ return params.Repository{}, runnerErrors.NewBadRequestError("invalid pool balancer type: %s", param.PoolBalancerType)
}
- if param.CredentialsName != "" {
- // Check that credentials are set before saving to db
- if _, ok := r.credentials[param.CredentialsName]; !ok {
- return params.Repository{}, runnerErrors.NewBadRequestError("invalid credentials (%s) for repo %s/%s", param.CredentialsName, repo.Owner, repo.Name)
- }
+ slog.InfoContext(ctx, "updating repository", "repo_id", repoID, "param", param)
+ repo, err := r.store.UpdateRepository(ctx, repoID, param)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error updating repo: %w", err)
}
- repo, err = r.store.UpdateRepository(ctx, repoID, param)
+ poolMgr, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "updating repo")
- }
-
- poolMgr, err := r.poolManagerCtrl.UpdateRepoPoolManager(r.ctx, repo)
- if err != nil {
- return params.Repository{}, fmt.Errorf("failed to update pool manager: %w", err)
+ return params.Repository{}, fmt.Errorf("error getting pool manager: %w", err)
}
repo.PoolManagerStatus = poolMgr.Status()
@@ -196,30 +234,23 @@ func (r *Runner) CreateRepoPool(ctx context.Context, repoID string, param params
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- r.mux.Lock()
- defer r.mux.Unlock()
-
- repo, err := r.store.GetRepositoryByID(ctx, repoID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching repo")
- }
-
- if _, err := r.poolManagerCtrl.GetRepoPoolManager(repo); err != nil {
- return params.Pool{}, runnerErrors.ErrNotFound
- }
-
createPoolParams, err := r.appendTagsToCreatePoolParams(param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool params")
+ return params.Pool{}, fmt.Errorf("error appending tags to create pool params: %w", err)
}
if createPoolParams.RunnerBootstrapTimeout == 0 {
createPoolParams.RunnerBootstrapTimeout = appdefaults.DefaultRunnerBootstrapTimeout
}
- pool, err := r.store.CreateRepositoryPool(ctx, repoID, createPoolParams)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+
+ pool, err := r.store.CreateEntityPool(ctx, entity, createPoolParams)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "creating pool")
+ return params.Pool{}, fmt.Errorf("error creating pool: %w", err)
}
return pool, nil
@@ -230,10 +261,16 @@ func (r *Runner) GetRepoPoolByID(ctx context.Context, repoID, poolID string) (pa
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetRepositoryPool(ctx, repoID, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
}
+
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
+ }
+
return pool, nil
}
@@ -242,27 +279,27 @@ func (r *Runner) DeleteRepoPool(ctx context.Context, repoID, poolID string) erro
return runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetRepositoryPool(ctx, repoID, poolID)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
- }
-
- instances, err := r.store.ListPoolInstances(ctx, pool.ID)
- if err != nil {
- return errors.Wrap(err, "fetching instances")
+ return fmt.Errorf("error fetching pool: %w", err)
}
+ // nolint:golangci-lint,godox
// TODO: implement a count function
- if len(instances) > 0 {
+ if len(pool.Instances) > 0 {
runnerIDs := []string{}
- for _, run := range instances {
+ for _, run := range pool.Instances {
runnerIDs = append(runnerIDs, run.ID)
}
return runnerErrors.NewBadRequestError("pool has runners: %s", strings.Join(runnerIDs, ", "))
}
- if err := r.store.DeleteRepositoryPool(ctx, repoID, poolID); err != nil {
- return errors.Wrap(err, "deleting pool")
+ if err := r.store.DeleteEntityPool(ctx, entity, poolID); err != nil {
+ return fmt.Errorf("error deleting pool: %w", err)
}
return nil
}
@@ -271,10 +308,13 @@ func (r *Runner) ListRepoPools(ctx context.Context, repoID string) ([]params.Poo
if !auth.IsAdmin(ctx) {
return []params.Pool{}, runnerErrors.ErrUnauthorized
}
-
- pools, err := r.store.ListRepoPools(ctx, repoID)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pools, err := r.store.ListEntityPools(ctx, entity)
if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
@@ -286,7 +326,7 @@ func (r *Runner) ListPoolInstances(ctx context.Context, poolID string) ([]params
instances, err := r.store.ListPoolInstances(ctx, poolID)
if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching instances")
+ return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err)
}
return instances, nil
}
@@ -296,9 +336,13 @@ func (r *Runner) UpdateRepoPool(ctx context.Context, repoID, poolID string, para
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetRepositoryPool(ctx, repoID, poolID)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
maxRunners := pool.MaxRunners
@@ -315,9 +359,9 @@ func (r *Runner) UpdateRepoPool(ctx context.Context, repoID, poolID string, para
return params.Pool{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
}
- newPool, err := r.store.UpdateRepositoryPool(ctx, repoID, poolID, param)
+ newPool, err := r.store.UpdateEntityPool(ctx, entity, poolID, param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "updating pool")
+ return params.Pool{}, fmt.Errorf("error updating pool: %w", err)
}
return newPool, nil
}
@@ -326,26 +370,94 @@ func (r *Runner) ListRepoInstances(ctx context.Context, repoID string) ([]params
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
-
- instances, err := r.store.ListRepoInstances(ctx, repoID)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ instances, err := r.store.ListEntityInstances(ctx, entity)
if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching instances")
+ return []params.Instance{}, fmt.Errorf("error , errfetching instances: %w", err)
}
return instances, nil
}
-func (r *Runner) findRepoPoolManager(owner, name string) (common.PoolManager, error) {
+func (r *Runner) findRepoPoolManager(owner, name, endpointName string) (common.PoolManager, error) {
r.mux.Lock()
defer r.mux.Unlock()
- repo, err := r.store.GetRepository(r.ctx, owner, name)
+ repo, err := r.store.GetRepository(r.ctx, owner, name, endpointName)
if err != nil {
- return nil, errors.Wrap(err, "fetching repo")
+ return nil, fmt.Errorf("error fetching repo: %w", err)
}
poolManager, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
if err != nil {
- return nil, errors.Wrap(err, "fetching pool manager for repo")
+ return nil, fmt.Errorf("error fetching pool manager for repo: %w", err)
}
return poolManager, nil
}
+
+func (r *Runner) InstallRepoWebhook(ctx context.Context, repoID string, param params.InstallWebhookParams) (params.HookInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.HookInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ repo, err := r.store.GetRepositoryByID(ctx, repoID)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching repo: %w", err)
+ }
+
+ poolManager, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching pool manager for repo: %w", err)
+ }
+
+ info, err := poolManager.InstallWebhook(ctx, param)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error installing webhook: %w", err)
+ }
+ return info, nil
+}
+
+func (r *Runner) UninstallRepoWebhook(ctx context.Context, repoID string) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ repo, err := r.store.GetRepositoryByID(ctx, repoID)
+ if err != nil {
+ return fmt.Errorf("error fetching repo: %w", err)
+ }
+
+ poolManager, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager for repo: %w", err)
+ }
+
+ if err := poolManager.UninstallWebhook(ctx); err != nil {
+ return fmt.Errorf("error uninstalling webhook: %w", err)
+ }
+ return nil
+}
+
+func (r *Runner) GetRepoWebhookInfo(ctx context.Context, repoID string) (params.HookInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.HookInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ repo, err := r.store.GetRepositoryByID(ctx, repoID)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching repo: %w", err)
+ }
+
+ poolManager, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching pool manager for repo: %w", err)
+ }
+
+ info, err := poolManager.GetWebhookInfo(ctx)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error getting webhook info: %w", err)
+ }
+ return info, nil
+}
diff --git a/runner/repositories_test.go b/runner/repositories_test.go
index 0b2c527a..8f195ae3 100644
--- a/runner/repositories_test.go
+++ b/runner/repositories_test.go
@@ -16,58 +16,72 @@ package runner
import (
"context"
+ "errors"
"fmt"
"testing"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database"
dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
garmTesting "github.com/cloudbase/garm/internal/testing"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
runnerCommonMocks "github.com/cloudbase/garm/runner/common/mocks"
runnerMocks "github.com/cloudbase/garm/runner/mocks"
-
- "github.com/stretchr/testify/mock"
- "github.com/stretchr/testify/suite"
)
type RepoTestFixtures struct {
- AdminContext context.Context
- Store dbCommon.Store
- StoreRepos map[string]params.Repository
- Providers map[string]common.Provider
- Credentials map[string]config.Github
- CreateRepoParams params.CreateRepoParams
- CreatePoolParams params.CreatePoolParams
- CreateInstanceParams params.CreateInstanceParams
- UpdateRepoParams params.UpdateEntityParams
- UpdatePoolParams params.UpdatePoolParams
- UpdatePoolStateParams params.UpdatePoolStateParams
- ErrMock error
- ProviderMock *runnerCommonMocks.Provider
- PoolMgrMock *runnerCommonMocks.PoolManager
- PoolMgrCtrlMock *runnerMocks.PoolManagerController
+ AdminContext context.Context
+ Store dbCommon.Store
+ StoreRepos map[string]params.Repository
+ Providers map[string]common.Provider
+ Credentials map[string]params.ForgeCredentials
+ CreateRepoParams params.CreateRepoParams
+ CreatePoolParams params.CreatePoolParams
+ CreateInstanceParams params.CreateInstanceParams
+ UpdateRepoParams params.UpdateEntityParams
+ UpdatePoolParams params.UpdatePoolParams
+ ErrMock error
+ ProviderMock *runnerCommonMocks.Provider
+ PoolMgrMock *runnerCommonMocks.PoolManager
+ PoolMgrCtrlMock *runnerMocks.PoolManagerController
+}
+
+func init() {
+ watcher.SetWatcher(&garmTesting.MockWatcher{})
}
type RepoTestSuite struct {
suite.Suite
Fixtures *RepoTestFixtures
Runner *Runner
+
+ testCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ giteaTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ giteaEndpoint params.ForgeEndpoint
}
func (s *RepoTestSuite) SetupTest() {
- adminCtx := auth.GetAdminContext()
-
// create testing sqlite database
dbCfg := garmTesting.GetTestSqliteDBConfig(s.T())
- db, err := database.NewDatabase(adminCtx, dbCfg)
+ db, err := database.NewDatabase(context.Background(), dbCfg)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.giteaEndpoint = garmTesting.CreateDefaultGiteaEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+ s.giteaTestCreds = garmTesting.CreateTestGiteaCredentials(adminCtx, "gitea-creds", db, s.T(), s.giteaEndpoint)
+
// create some repository objects in the database, for testing purposes
repos := map[string]params.Repository{}
for i := 1; i <= 3; i++ {
@@ -76,11 +90,12 @@ func (s *RepoTestSuite) SetupTest() {
adminCtx,
fmt.Sprintf("test-owner-%v", i),
name,
- fmt.Sprintf("test-creds-%v", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%v", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
- s.FailNow(fmt.Sprintf("failed to create database object (test-repo-%v)", i))
+ s.FailNow(fmt.Sprintf("failed to create database object (test-repo-%v): %q", i, err))
}
repos[name] = repo
}
@@ -90,24 +105,22 @@ func (s *RepoTestSuite) SetupTest() {
var minIdleRunners uint = 20
providerMock := runnerCommonMocks.NewProvider(s.T())
fixtures := &RepoTestFixtures{
- AdminContext: auth.GetAdminContext(),
+ AdminContext: adminCtx,
Store: db,
StoreRepos: repos,
Providers: map[string]common.Provider{
"test-provider": providerMock,
},
- Credentials: map[string]config.Github{
- "test-creds": {
- Name: "test-creds-name",
- Description: "test-creds-description",
- OAuth2Token: "test-creds-oauth2-token",
- },
+ Credentials: map[string]params.ForgeCredentials{
+ s.testCreds.Name: s.testCreds,
+ s.secondaryTestCreds.Name: s.secondaryTestCreds,
},
CreateRepoParams: params.CreateRepoParams{
Owner: "test-owner-create",
Name: "test-repo-create",
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-create-repo-webhook-secret",
+ ForgeType: params.GithubEndpointType,
},
CreatePoolParams: params.CreatePoolParams{
ProviderName: "test-provider",
@@ -117,7 +130,7 @@ func (s *RepoTestSuite) SetupTest() {
Flavor: "test",
OSType: "linux",
OSArch: "arm64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"arm64-linux-runner"},
RunnerBootstrapTimeout: 0,
},
CreateInstanceParams: params.CreateInstanceParams{
@@ -125,7 +138,7 @@ func (s *RepoTestSuite) SetupTest() {
OSType: "linux",
},
UpdateRepoParams: params.UpdateEntityParams{
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -134,9 +147,6 @@ func (s *RepoTestSuite) SetupTest() {
Image: "test-images-updated",
Flavor: "test-flavor-updated",
},
- UpdatePoolStateParams: params.UpdatePoolStateParams{
- WebhookSecret: "test-update-repo-webhook-secret",
- },
ErrMock: fmt.Errorf("mock error"),
ProviderMock: providerMock,
PoolMgrMock: runnerCommonMocks.NewPoolManager(s.T()),
@@ -147,7 +157,6 @@ func (s *RepoTestSuite) SetupTest() {
// setup test runner
runner := &Runner{
providers: fixtures.Providers,
- credentials: fixtures.Credentials,
ctx: fixtures.AdminContext,
store: fixtures.Store,
poolManagerCtrl: fixtures.PoolMgrCtrlMock,
@@ -166,10 +175,32 @@ func (s *RepoTestSuite) TestCreateRepository() {
// assertions
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.CreateRepoParams.Owner, repo.Owner)
s.Require().Equal(s.Fixtures.CreateRepoParams.Name, repo.Name)
- s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateRepoParams.CredentialsName].Name, repo.CredentialsName)
+ s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateRepoParams.CredentialsName].Name, repo.Credentials.Name)
+ s.Require().Equal(params.PoolBalancerTypeRoundRobin, repo.PoolBalancerType)
+}
+
+func (s *RepoTestSuite) TestCreateRepositoryPoolBalancerTypePack() {
+ // setup mocks expectations
+ s.Fixtures.PoolMgrMock.On("Start").Return(nil)
+ s.Fixtures.PoolMgrCtrlMock.On("CreateRepoPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Repository"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, nil)
+
+ // call tested function
+ param := s.Fixtures.CreateRepoParams
+ param.PoolBalancerType = params.PoolBalancerTypePack
+ repo, err := s.Runner.CreateRepository(s.Fixtures.AdminContext, param)
+
+ // assertions
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Require().Nil(err)
+ s.Require().Equal(param.Owner, repo.Owner)
+ s.Require().Equal(param.Name, repo.Name)
+ s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateRepoParams.CredentialsName].Name, repo.Credentials.Name)
+ s.Require().Equal(params.PoolBalancerTypePack, repo.PoolBalancerType)
}
func (s *RepoTestSuite) TestCreateRepositoryErrUnauthorized() {
@@ -185,7 +216,7 @@ func (s *RepoTestSuite) TestCreateRepositoryEmptyParams() {
}
func (s *RepoTestSuite) TestCreateRepositoryMissingCredentials() {
- s.Fixtures.CreateRepoParams.CredentialsName = "not-existent-creds-name"
+ s.Fixtures.CreateRepoParams.CredentialsName = notExistingCredentialsName
_, err := s.Runner.CreateRepository(s.Fixtures.AdminContext, s.Fixtures.CreateRepoParams)
@@ -209,7 +240,7 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error creating repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestCreateRepositoryStartPoolMgrFailed() {
@@ -221,20 +252,87 @@ func (s *RepoTestSuite) TestCreateRepositoryStartPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("starting repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error starting repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestListRepositories() {
s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
- repos, err := s.Runner.ListRepositories(s.Fixtures.AdminContext)
+ repos, err := s.Runner.ListRepositories(s.Fixtures.AdminContext, params.RepositoryFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), garmTesting.DBEntityMapToSlice(s.Fixtures.StoreRepos), repos)
}
+func (s *RepoTestSuite) TestListRepositoriesWithFilters() {
+ s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ repo, err := s.Fixtures.Store.CreateRepository(
+ s.Fixtures.AdminContext,
+ "example-owner",
+ "example-repo",
+ s.testCreds,
+ "test-webhook-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create database object (example-repo): %q", err))
+ }
+
+ repo2, err := s.Fixtures.Store.CreateRepository(
+ s.Fixtures.AdminContext,
+ "another-example-owner",
+ "example-repo",
+ s.testCreds,
+ "test-webhook-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create database object (example-repo): %q", err))
+ }
+
+ repo3, err := s.Fixtures.Store.CreateRepository(
+ s.Fixtures.AdminContext,
+ "example-owner",
+ "example-repo",
+ s.giteaTestCreds,
+ "test-webhook-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create database object (example-repo): %q", err))
+ }
+
+ repos, err := s.Runner.ListRepositories(s.Fixtures.AdminContext, params.RepositoryFilter{Name: "example-repo"})
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Repository{repo, repo2, repo3}, repos)
+
+ repos, err = s.Runner.ListRepositories(
+ s.Fixtures.AdminContext,
+ params.RepositoryFilter{
+ Name: "example-repo",
+ Owner: "example-owner",
+ },
+ )
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Repository{repo, repo3}, repos)
+
+ repos, err = s.Runner.ListRepositories(
+ s.Fixtures.AdminContext,
+ params.RepositoryFilter{
+ Name: "example-repo",
+ Owner: "example-owner",
+ Endpoint: s.giteaEndpoint.Name,
+ },
+ )
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Repository{repo3}, repos)
+}
+
func (s *RepoTestSuite) TestListRepositoriesErrUnauthorized() {
- _, err := s.Runner.ListRepositories(context.Background())
+ _, err := s.Runner.ListRepositories(context.Background(), params.RepositoryFilter{})
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
@@ -257,28 +355,32 @@ func (s *RepoTestSuite) TestGetRepositoryByIDErrUnauthorized() {
func (s *RepoTestSuite) TestDeleteRepository() {
s.Fixtures.PoolMgrCtrlMock.On("DeleteRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(nil)
- err := s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID)
+ err := s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, true)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
_, err = s.Fixtures.Store.GetRepositoryByID(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID)
- s.Require().Equal("fetching repo: not found", err.Error())
+ s.Require().Equal("error fetching repo: not found", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryErrUnauthorized() {
- err := s.Runner.DeleteRepository(context.Background(), "dummy-repo-id")
+ err := s.Runner.DeleteRepository(context.Background(), "dummy-repo-id", true)
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
func (s *RepoTestSuite) TestDeleteRepositoryPoolDefinedFailed() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create store repositories pool: %v", err))
}
- err = s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID)
+ err = s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, true)
s.Require().Equal(runnerErrors.NewBadRequestError("repo has pools defined (%s)", pool.ID), err)
}
@@ -286,14 +388,14 @@ func (s *RepoTestSuite) TestDeleteRepositoryPoolDefinedFailed() {
func (s *RepoTestSuite) TestDeleteRepositoryPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("DeleteRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.ErrMock)
- err := s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID)
+ err := s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, true)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("deleting repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error deleting repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestUpdateRepository() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateRepoPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
repo, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
@@ -301,45 +403,61 @@ func (s *RepoTestSuite) TestUpdateRepository() {
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, repo.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, repo.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, repo.WebhookSecret)
+ s.Require().Equal(params.PoolBalancerTypeRoundRobin, repo.PoolBalancerType)
+}
+
+func (s *RepoTestSuite) TestUpdateRepositoryBalancingType() {
+ s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ updateRepoParams := s.Fixtures.UpdateRepoParams
+ updateRepoParams.PoolBalancerType = params.PoolBalancerTypePack
+ repo, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, updateRepoParams)
+
+ s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
+ s.Require().Nil(err)
+ s.Require().Equal(updateRepoParams.CredentialsName, repo.Credentials.Name)
+ s.Require().Equal(updateRepoParams.WebhookSecret, repo.WebhookSecret)
+ s.Require().Equal(params.PoolBalancerTypePack, repo.PoolBalancerType)
}
func (s *RepoTestSuite) TestUpdateRepositoryErrUnauthorized() {
_, err := s.Runner.UpdateRepository(context.Background(), "dummy-repo-id", s.Fixtures.UpdateRepoParams)
-
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
func (s *RepoTestSuite) TestUpdateRepositoryInvalidCreds() {
- s.Fixtures.UpdateRepoParams.CredentialsName = "invalid-creds-name"
+ s.Fixtures.UpdateRepoParams.CredentialsName = invalidCredentialsName
_, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Require().Equal(runnerErrors.NewBadRequestError("invalid credentials (%s) for repo %s/%s", s.Fixtures.UpdateRepoParams.CredentialsName, s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name), err)
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.FailNow(fmt.Sprintf("expected error: %v", runnerErrors.ErrNotFound))
+ }
}
func (s *RepoTestSuite) TestUpdateRepositoryPoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateRepoPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("failed to update pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error getting pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestUpdateRepositoryCreateRepoPoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("UpdateRepoPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("failed to update pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error getting pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestCreateRepoPool() {
- s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
-
pool, err := s.Runner.CreateRepoPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
@@ -363,30 +481,21 @@ func (s *RepoTestSuite) TestCreateRepoPoolErrUnauthorized() {
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
-func (s *RepoTestSuite) TestCreateRepoPoolErrNotFound() {
- s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
-
- _, err := s.Runner.CreateRepoPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
-
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
- s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(runnerErrors.ErrNotFound, err)
-}
-
func (s *RepoTestSuite) TestCreateRepoPoolFetchPoolParamsFailed() {
- s.Fixtures.CreatePoolParams.ProviderName = "not-existent-provider-name"
-
- s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
-
+ s.Fixtures.CreatePoolParams.ProviderName = notExistingProviderName
_, err := s.Runner.CreateRepoPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Regexp("fetching pool params: no such provider", err.Error())
+ s.Require().Regexp("appending tags to create pool params: no such provider not-existent-provider-name", err.Error())
}
func (s *RepoTestSuite) TestGetRepoPoolByID() {
- repoPool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ repoPool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %s", err))
}
@@ -404,7 +513,11 @@ func (s *RepoTestSuite) TestGetRepoPoolByIDErrUnauthorized() {
}
func (s *RepoTestSuite) TestDeleteRepoPool() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %s", err))
}
@@ -413,8 +526,8 @@ func (s *RepoTestSuite) TestDeleteRepoPool() {
s.Require().Nil(err)
- _, err = s.Fixtures.Store.GetRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Fixtures.Store.GetEntityPool(s.Fixtures.AdminContext, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepoPoolErrUnauthorized() {
@@ -424,7 +537,11 @@ func (s *RepoTestSuite) TestDeleteRepoPoolErrUnauthorized() {
}
func (s *RepoTestSuite) TestDeleteRepoPoolRunnersFailed() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %s", err))
}
@@ -439,10 +556,14 @@ func (s *RepoTestSuite) TestDeleteRepoPoolRunnersFailed() {
}
func (s *RepoTestSuite) TestListRepoPools() {
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
repoPools := []params.Pool{}
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Image = fmt.Sprintf("test-repo-%v", i)
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
@@ -462,7 +583,11 @@ func (s *RepoTestSuite) TestListRepoPoolsErrUnauthorized() {
}
func (s *RepoTestSuite) TestListPoolInstances() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
@@ -489,7 +614,11 @@ func (s *RepoTestSuite) TestListPoolInstancesErrUnauthorized() {
}
func (s *RepoTestSuite) TestUpdateRepoPool() {
- repoPool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ repoPool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create store repositories pool: %v", err))
}
@@ -508,7 +637,11 @@ func (s *RepoTestSuite) TestUpdateRepoPoolErrUnauthorized() {
}
func (s *RepoTestSuite) TestUpdateRepoPoolMinIdleGreaterThanMax() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %s", err))
}
@@ -523,7 +656,11 @@ func (s *RepoTestSuite) TestUpdateRepoPoolMinIdleGreaterThanMax() {
}
func (s *RepoTestSuite) TestListRepoInstances() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
@@ -552,7 +689,7 @@ func (s *RepoTestSuite) TestListRepoInstancesErrUnauthorized() {
func (s *RepoTestSuite) TestFindRepoPoolManager() {
s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
- poolManager, err := s.Runner.findRepoPoolManager(s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name)
+ poolManager, err := s.Runner.findRepoPoolManager(s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name, s.Fixtures.StoreRepos["test-repo-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
@@ -563,7 +700,7 @@ func (s *RepoTestSuite) TestFindRepoPoolManager() {
func (s *RepoTestSuite) TestFindRepoPoolManagerFetchPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- _, err := s.Runner.findRepoPoolManager(s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name)
+ _, err := s.Runner.findRepoPoolManager(s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name, s.Fixtures.StoreRepos["test-repo-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
diff --git a/runner/runner.go b/runner/runner.go
index b8120adc..bf081522 100644
--- a/runner/runner.go
+++ b/runner/runner.go
@@ -17,21 +17,24 @@ package runner
import (
"context"
"crypto/hmac"
- "crypto/sha1"
+ "crypto/sha1" //nolint:golangci-lint,gosec // sha1 is used for github webhooks
"crypto/sha256"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"hash"
- "log"
+ "log/slog"
+ "net/url"
"os"
"strings"
"sync"
"time"
- commonParams "github.com/cloudbase/garm-provider-common/params"
+ "golang.org/x/sync/errgroup"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
"github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/auth"
"github.com/cloudbase/garm/config"
@@ -40,23 +43,19 @@ import (
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/runner/pool"
"github.com/cloudbase/garm/runner/providers"
- "golang.org/x/sync/errgroup"
-
- "github.com/google/uuid"
- "github.com/juju/clock"
- "github.com/juju/retry"
- "github.com/pkg/errors"
+ "github.com/cloudbase/garm/util/github"
+ "github.com/cloudbase/garm/util/github/scalesets"
)
func NewRunner(ctx context.Context, cfg config.Config, db dbCommon.Store) (*Runner, error) {
- ctrlId, err := db.ControllerInfo()
+ ctrlID, err := db.ControllerInfo()
if err != nil {
- return nil, errors.Wrap(err, "fetching controller info")
+ return nil, fmt.Errorf("error fetching controller info: %w", err)
}
- providers, err := providers.LoadProvidersFromConfig(ctx, cfg, ctrlId.ControllerID.String())
+ providers, err := providers.LoadProvidersFromConfig(ctx, cfg, ctrlID.ControllerID.String())
if err != nil {
- return nil, errors.Wrap(err, "loading providers")
+ return nil, fmt.Errorf("error loading providers: %w", err)
}
creds := map[string]config.Github{}
@@ -66,9 +65,8 @@ func NewRunner(ctx context.Context, cfg config.Config, db dbCommon.Store) (*Runn
}
poolManagerCtrl := &poolManagerCtrl{
- controllerID: ctrlId.ControllerID.String(),
config: cfg,
- credentials: creds,
+ store: db,
repositories: map[string]common.PoolManager{},
organizations: map[string]common.PoolManager{},
enterprises: map[string]common.PoolManager{},
@@ -79,12 +77,10 @@ func NewRunner(ctx context.Context, cfg config.Config, db dbCommon.Store) (*Runn
store: db,
poolManagerCtrl: poolManagerCtrl,
providers: providers,
- credentials: creds,
- controllerID: ctrlId.ControllerID,
}
if err := runner.loadReposOrgsAndEnterprises(); err != nil {
- return nil, errors.Wrap(err, "loading pool managers")
+ return nil, fmt.Errorf("error loading pool managers: %w", err)
}
return runner, nil
@@ -93,9 +89,8 @@ func NewRunner(ctx context.Context, cfg config.Config, db dbCommon.Store) (*Runn
type poolManagerCtrl struct {
mux sync.Mutex
- controllerID string
- config config.Config
- credentials map[string]config.Github
+ config config.Config
+ store dbCommon.Store
repositories map[string]common.PoolManager
organizations map[string]common.PoolManager
@@ -106,48 +101,28 @@ func (p *poolManagerCtrl) CreateRepoPoolManager(ctx context.Context, repo params
p.mux.Lock()
defer p.mux.Unlock()
- cfgInternal, err := p.getInternalConfig(repo.CredentialsName)
+ entity, err := repo.GetEntity()
if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
+ return nil, fmt.Errorf("error getting entity: %w", err)
}
- poolManager, err := pool.NewRepositoryPoolManager(ctx, repo, cfgInternal, providers, store)
+
+ instanceTokenGetter, err := auth.NewInstanceTokenGetter(p.config.JWTAuth.Secret)
if err != nil {
- return nil, errors.Wrap(err, "creating repo pool manager")
+ return nil, fmt.Errorf("error creating instance token getter: %w", err)
+ }
+ poolManager, err := pool.NewEntityPoolManager(ctx, entity, instanceTokenGetter, providers, store)
+ if err != nil {
+ return nil, fmt.Errorf("error creating repo pool manager: %w", err)
}
p.repositories[repo.ID] = poolManager
return poolManager, nil
}
-func (p *poolManagerCtrl) UpdateRepoPoolManager(ctx context.Context, repo params.Repository) (common.PoolManager, error) {
- p.mux.Lock()
- defer p.mux.Unlock()
-
- poolMgr, ok := p.repositories[repo.ID]
- if !ok {
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "repository %s/%s pool manager not loaded", repo.Owner, repo.Name)
- }
-
- internalCfg, err := p.getInternalConfig(repo.CredentialsName)
- if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
- }
-
- newState := params.UpdatePoolStateParams{
- WebhookSecret: repo.WebhookSecret,
- InternalConfig: &internalCfg,
- }
-
- if err := poolMgr.RefreshState(newState); err != nil {
- return nil, errors.Wrap(err, "updating repo pool manager")
- }
- return poolMgr, nil
-}
-
func (p *poolManagerCtrl) GetRepoPoolManager(repo params.Repository) (common.PoolManager, error) {
if repoPoolMgr, ok := p.repositories[repo.ID]; ok {
return repoPoolMgr, nil
}
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "repository %s/%s pool manager not loaded", repo.Owner, repo.Name)
+ return nil, fmt.Errorf("repository %s/%s pool manager not loaded: %w", repo.Owner, repo.Name, runnerErrors.ErrNotFound)
}
func (p *poolManagerCtrl) DeleteRepoPoolManager(repo params.Repository) error {
@@ -157,7 +132,7 @@ func (p *poolManagerCtrl) DeleteRepoPoolManager(repo params.Repository) error {
poolMgr, ok := p.repositories[repo.ID]
if ok {
if err := poolMgr.Stop(); err != nil {
- return errors.Wrap(err, "stopping repo pool manager")
+ return fmt.Errorf("error stopping repo pool manager: %w", err)
}
delete(p.repositories, repo.ID)
}
@@ -172,48 +147,28 @@ func (p *poolManagerCtrl) CreateOrgPoolManager(ctx context.Context, org params.O
p.mux.Lock()
defer p.mux.Unlock()
- cfgInternal, err := p.getInternalConfig(org.CredentialsName)
+ entity, err := org.GetEntity()
if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
+ return nil, fmt.Errorf("error getting entity: %w", err)
}
- poolManager, err := pool.NewOrganizationPoolManager(ctx, org, cfgInternal, providers, store)
+
+ instanceTokenGetter, err := auth.NewInstanceTokenGetter(p.config.JWTAuth.Secret)
if err != nil {
- return nil, errors.Wrap(err, "creating org pool manager")
+ return nil, fmt.Errorf("error creating instance token getter: %w", err)
+ }
+ poolManager, err := pool.NewEntityPoolManager(ctx, entity, instanceTokenGetter, providers, store)
+ if err != nil {
+ return nil, fmt.Errorf("error creating org pool manager: %w", err)
}
p.organizations[org.ID] = poolManager
return poolManager, nil
}
-func (p *poolManagerCtrl) UpdateOrgPoolManager(ctx context.Context, org params.Organization) (common.PoolManager, error) {
- p.mux.Lock()
- defer p.mux.Unlock()
-
- poolMgr, ok := p.organizations[org.ID]
- if !ok {
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "org %s pool manager not loaded", org.Name)
- }
-
- internalCfg, err := p.getInternalConfig(org.CredentialsName)
- if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
- }
-
- newState := params.UpdatePoolStateParams{
- WebhookSecret: org.WebhookSecret,
- InternalConfig: &internalCfg,
- }
-
- if err := poolMgr.RefreshState(newState); err != nil {
- return nil, errors.Wrap(err, "updating repo pool manager")
- }
- return poolMgr, nil
-}
-
func (p *poolManagerCtrl) GetOrgPoolManager(org params.Organization) (common.PoolManager, error) {
if orgPoolMgr, ok := p.organizations[org.ID]; ok {
return orgPoolMgr, nil
}
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "organization %s pool manager not loaded", org.Name)
+ return nil, fmt.Errorf("organization %s pool manager not loaded: %w", org.Name, runnerErrors.ErrNotFound)
}
func (p *poolManagerCtrl) DeleteOrgPoolManager(org params.Organization) error {
@@ -223,7 +178,7 @@ func (p *poolManagerCtrl) DeleteOrgPoolManager(org params.Organization) error {
poolMgr, ok := p.organizations[org.ID]
if ok {
if err := poolMgr.Stop(); err != nil {
- return errors.Wrap(err, "stopping org pool manager")
+ return fmt.Errorf("error stopping org pool manager: %w", err)
}
delete(p.organizations, org.ID)
}
@@ -238,48 +193,28 @@ func (p *poolManagerCtrl) CreateEnterprisePoolManager(ctx context.Context, enter
p.mux.Lock()
defer p.mux.Unlock()
- cfgInternal, err := p.getInternalConfig(enterprise.CredentialsName)
+ entity, err := enterprise.GetEntity()
if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
+ return nil, fmt.Errorf("error getting entity: %w", err)
}
- poolManager, err := pool.NewEnterprisePoolManager(ctx, enterprise, cfgInternal, providers, store)
+
+ instanceTokenGetter, err := auth.NewInstanceTokenGetter(p.config.JWTAuth.Secret)
if err != nil {
- return nil, errors.Wrap(err, "creating enterprise pool manager")
+ return nil, fmt.Errorf("error creating instance token getter: %w", err)
+ }
+ poolManager, err := pool.NewEntityPoolManager(ctx, entity, instanceTokenGetter, providers, store)
+ if err != nil {
+ return nil, fmt.Errorf("error creating enterprise pool manager: %w", err)
}
p.enterprises[enterprise.ID] = poolManager
return poolManager, nil
}
-func (p *poolManagerCtrl) UpdateEnterprisePoolManager(ctx context.Context, enterprise params.Enterprise) (common.PoolManager, error) {
- p.mux.Lock()
- defer p.mux.Unlock()
-
- poolMgr, ok := p.enterprises[enterprise.ID]
- if !ok {
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "enterprise %s pool manager not loaded", enterprise.Name)
- }
-
- internalCfg, err := p.getInternalConfig(enterprise.CredentialsName)
- if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
- }
-
- newState := params.UpdatePoolStateParams{
- WebhookSecret: enterprise.WebhookSecret,
- InternalConfig: &internalCfg,
- }
-
- if err := poolMgr.RefreshState(newState); err != nil {
- return nil, errors.Wrap(err, "updating repo pool manager")
- }
- return poolMgr, nil
-}
-
func (p *poolManagerCtrl) GetEnterprisePoolManager(enterprise params.Enterprise) (common.PoolManager, error) {
if enterprisePoolMgr, ok := p.enterprises[enterprise.ID]; ok {
return enterprisePoolMgr, nil
}
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "enterprise %s pool manager not loaded", enterprise.Name)
+ return nil, fmt.Errorf("enterprise %s pool manager not loaded: %w", enterprise.Name, runnerErrors.ErrNotFound)
}
func (p *poolManagerCtrl) DeleteEnterprisePoolManager(enterprise params.Enterprise) error {
@@ -289,7 +224,7 @@ func (p *poolManagerCtrl) DeleteEnterprisePoolManager(enterprise params.Enterpri
poolMgr, ok := p.enterprises[enterprise.ID]
if ok {
if err := poolMgr.Stop(); err != nil {
- return errors.Wrap(err, "stopping enterprise pool manager")
+ return fmt.Errorf("error stopping enterprise pool manager: %w", err)
}
delete(p.enterprises, enterprise.ID)
}
@@ -300,34 +235,6 @@ func (p *poolManagerCtrl) GetEnterprisePoolManagers() (map[string]common.PoolMan
return p.enterprises, nil
}
-func (p *poolManagerCtrl) getInternalConfig(credsName string) (params.Internal, error) {
- creds, ok := p.credentials[credsName]
- if !ok {
- return params.Internal{}, runnerErrors.NewBadRequestError("invalid credential name (%s)", credsName)
- }
-
- caBundle, err := creds.CACertBundle()
- if err != nil {
- return params.Internal{}, fmt.Errorf("fetching CA bundle for creds: %w", err)
- }
-
- return params.Internal{
- OAuth2Token: creds.OAuth2Token,
- ControllerID: p.controllerID,
- InstanceCallbackURL: p.config.Default.CallbackURL,
- InstanceMetadataURL: p.config.Default.MetadataURL,
- JWTSecret: p.config.JWTAuth.Secret,
- GithubCredentialsDetails: params.GithubCredentials{
- Name: creds.Name,
- Description: creds.Description,
- BaseURL: creds.BaseEndpoint(),
- APIBaseURL: creds.APIEndpoint(),
- UploadBaseURL: creds.UploadEndpoint(),
- CABundle: caBundle,
- },
- }, nil
-}
-
type Runner struct {
mux sync.Mutex
@@ -337,11 +244,24 @@ type Runner struct {
poolManagerCtrl PoolManagerController
- providers map[string]common.Provider
- credentials map[string]config.Github
+ providers map[string]common.Provider
+}
- controllerInfo params.ControllerInfo
- controllerID uuid.UUID
+// UpdateController will update the controller settings.
+func (r *Runner) UpdateController(ctx context.Context, param params.UpdateControllerParams) (params.ControllerInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ControllerInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error validating controller update params: %w", err)
+ }
+
+ info, err := r.store.UpdateController(param)
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error updating controller info: %w", err)
+ }
+ return info, nil
}
// GetControllerInfo returns the controller id and the hostname.
@@ -359,45 +279,34 @@ func (r *Runner) GetControllerInfo(ctx context.Context) (params.ControllerInfo,
// As a side note, Windows requires a reboot for the hostname change to take effect,
// so if we'll ever support Windows as a target system, the hostname can be cached.
var hostname string
- err := retry.Call(retry.CallArgs{
- Func: func() error {
- var err error
- hostname, err = os.Hostname()
- if err != nil {
- return errors.Wrap(err, "fetching hostname")
+ var err error
+ for range 10 {
+ hostname, err = os.Hostname()
+ if err != nil {
+ select {
+ case <-time.After(10 * time.Millisecond):
+ continue
+ case <-ctx.Done():
}
- return nil
- },
- Attempts: 10,
- Delay: 100 * time.Millisecond,
- Clock: clock.WallClock,
- })
+ return params.ControllerInfo{}, fmt.Errorf("error fetching hostname: %w", err)
+ }
+ break
+ }
if err != nil {
- return params.ControllerInfo{}, errors.Wrap(err, "fetching hostname")
+ return params.ControllerInfo{}, fmt.Errorf("error fetching hostname: %w", err)
}
- r.controllerInfo.Hostname = hostname
- return params.ControllerInfo{
- ControllerID: r.controllerID,
- Hostname: hostname,
- }, nil
-}
-func (r *Runner) ListCredentials(ctx context.Context) ([]params.GithubCredentials, error) {
- if !auth.IsAdmin(ctx) {
- return nil, runnerErrors.ErrUnauthorized
+ info, err := r.store.ControllerInfo()
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error fetching controller info: %w", err)
}
- ret := []params.GithubCredentials{}
- for _, val := range r.config.Github {
- ret = append(ret, params.GithubCredentials{
- Name: val.Name,
- Description: val.Description,
- BaseURL: val.BaseEndpoint(),
- APIBaseURL: val.APIEndpoint(),
- UploadBaseURL: val.UploadEndpoint(),
- })
- }
- return ret, nil
+ // This is temporary. Right now, GARM is a single-instance deployment. When we add the
+ // ability to scale out, the hostname field will be moved form here to a dedicated node
+ // object. As a single controller will be made up of multiple nodes, we will need to model
+ // that aspect of GARM.
+ info.Hostname = hostname
+ return info, nil
}
func (r *Runner) ListProviders(ctx context.Context) ([]params.Provider, error) {
@@ -416,26 +325,28 @@ func (r *Runner) loadReposOrgsAndEnterprises() error {
r.mux.Lock()
defer r.mux.Unlock()
- repos, err := r.store.ListRepositories(r.ctx)
+ repos, err := r.store.ListRepositories(r.ctx, params.RepositoryFilter{})
if err != nil {
- return errors.Wrap(err, "fetching repositories")
+ return fmt.Errorf("error fetching repositories: %w", err)
}
- orgs, err := r.store.ListOrganizations(r.ctx)
+ orgs, err := r.store.ListOrganizations(r.ctx, params.OrganizationFilter{})
if err != nil {
- return errors.Wrap(err, "fetching organizations")
+ return fmt.Errorf("error fetching organizations: %w", err)
}
- enterprises, err := r.store.ListEnterprises(r.ctx)
+ enterprises, err := r.store.ListEnterprises(r.ctx, params.EnterpriseFilter{})
if err != nil {
- return errors.Wrap(err, "fetching enterprises")
+ return fmt.Errorf("error fetching enterprises: %w", err)
}
g, _ := errgroup.WithContext(r.ctx)
for _, repo := range repos {
repo := repo
g.Go(func() error {
- log.Printf("creating pool manager for repo %s/%s", repo.Owner, repo.Name)
+ slog.InfoContext(
+ r.ctx, "creating pool manager for repo",
+ "repo_owner", repo.Owner, "repo_name", repo.Name)
_, err := r.poolManagerCtrl.CreateRepoPoolManager(r.ctx, repo, r.providers, r.store)
return err
})
@@ -444,7 +355,7 @@ func (r *Runner) loadReposOrgsAndEnterprises() error {
for _, org := range orgs {
org := org
g.Go(func() error {
- log.Printf("creating pool manager for organization %s", org.Name)
+ slog.InfoContext(r.ctx, "creating pool manager for organization", "org_name", org.Name)
_, err := r.poolManagerCtrl.CreateOrgPoolManager(r.ctx, org, r.providers, r.store)
return err
})
@@ -453,7 +364,7 @@ func (r *Runner) loadReposOrgsAndEnterprises() error {
for _, enterprise := range enterprises {
enterprise := enterprise
g.Go(func() error {
- log.Printf("creating pool manager for enterprise %s", enterprise.Name)
+ slog.InfoContext(r.ctx, "creating pool manager for enterprise", "enterprise_name", enterprise.Name)
_, err := r.poolManagerCtrl.CreateEnterprisePoolManager(r.ctx, enterprise, r.providers, r.store)
return err
})
@@ -471,17 +382,17 @@ func (r *Runner) Start() error {
repositories, err := r.poolManagerCtrl.GetRepoPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch repo pool managers")
+ return fmt.Errorf("error fetch repo pool managers: %w", err)
}
organizations, err := r.poolManagerCtrl.GetOrgPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch org pool managers")
+ return fmt.Errorf("error fetch org pool managers: %w", err)
}
enterprises, err := r.poolManagerCtrl.GetEnterprisePoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch enterprise pool managers")
+ return fmt.Errorf("error fetch enterprise pool managers: %w", err)
}
g, _ := errgroup.WithContext(r.ctx)
@@ -521,11 +432,12 @@ func (r *Runner) waitForErrorGroupOrTimeout(g *errgroup.Group) error {
go func() {
done <- g.Wait()
}()
-
+ timer := time.NewTimer(60 * time.Second)
+ defer timer.Stop()
select {
case err := <-done:
return err
- case <-time.After(60 * time.Second):
+ case <-timer.C:
return fmt.Errorf("timed out waiting for pool manager start")
}
}
@@ -536,17 +448,17 @@ func (r *Runner) Stop() error {
repos, err := r.poolManagerCtrl.GetRepoPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch repo pool managers")
+ return fmt.Errorf("error fetching repo pool managers: %w", err)
}
orgs, err := r.poolManagerCtrl.GetOrgPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch org pool managers")
+ return fmt.Errorf("error fetching org pool managers: %w", err)
}
enterprises, err := r.poolManagerCtrl.GetEnterprisePoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch enterprise pool managers")
+ return fmt.Errorf("error fetching enterprise pool managers: %w", err)
}
g, _ := errgroup.WithContext(r.ctx)
@@ -598,47 +510,47 @@ func (r *Runner) Wait() error {
repos, err := r.poolManagerCtrl.GetRepoPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch repo pool managers")
+ return fmt.Errorf("error fetching repo pool managers: %w", err)
}
orgs, err := r.poolManagerCtrl.GetOrgPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch org pool managers")
+ return fmt.Errorf("error fetching org pool managers: %w", err)
}
enterprises, err := r.poolManagerCtrl.GetEnterprisePoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch enterprise pool managers")
+ return fmt.Errorf("error fetching enterprise pool managers: %w", err)
}
- for poolId, repo := range repos {
+ for poolID, repo := range repos {
wg.Add(1)
go func(id string, poolMgr common.PoolManager) {
defer wg.Done()
if err := poolMgr.Wait(); err != nil {
- log.Printf("timed out waiting for pool manager %s to exit", id)
+ slog.With(slog.Any("error", err)).ErrorContext(r.ctx, "timed out waiting for pool manager to exit", "pool_id", id, "pool_mgr_id", poolMgr.ID())
}
- }(poolId, repo)
+ }(poolID, repo)
}
- for poolId, org := range orgs {
+ for poolID, org := range orgs {
wg.Add(1)
go func(id string, poolMgr common.PoolManager) {
defer wg.Done()
if err := poolMgr.Wait(); err != nil {
- log.Printf("timed out waiting for pool manager %s to exit", id)
+ slog.With(slog.Any("error", err)).ErrorContext(r.ctx, "timed out waiting for pool manager to exit", "pool_id", id)
}
- }(poolId, org)
+ }(poolID, org)
}
- for poolId, enterprise := range enterprises {
+ for poolID, enterprise := range enterprises {
wg.Add(1)
go func(id string, poolMgr common.PoolManager) {
defer wg.Done()
if err := poolMgr.Wait(); err != nil {
- log.Printf("timed out waiting for pool manager %s to exit", id)
+ slog.With(slog.Any("error", err)).ErrorContext(r.ctx, "timed out waiting for pool manager to exit", "pool_id", id)
}
- }(poolId, enterprise)
+ }(poolID, enterprise)
}
wg.Wait()
@@ -677,7 +589,7 @@ func (r *Runner) validateHookBody(signature, secret string, body []byte) error {
mac := hmac.New(hashFunc, []byte(secret))
_, err := mac.Write(body)
if err != nil {
- return errors.Wrap(err, "failed to compute sha256")
+ return fmt.Errorf("failed to compute sha256: %w", err)
}
expectedMAC := hex.EncodeToString(mac.Sum(nil))
@@ -688,47 +600,105 @@ func (r *Runner) validateHookBody(signature, secret string, body []byte) error {
return nil
}
-func (r *Runner) DispatchWorkflowJob(hookTargetType, signature string, jobData []byte) error {
+func (r *Runner) findEndpointForJob(job params.WorkflowJob, forgeType params.EndpointType) (params.ForgeEndpoint, error) {
+ uri, err := url.ParseRequestURI(job.WorkflowJob.HTMLURL)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error parsing job URL: %w", err)
+ }
+ baseURI := fmt.Sprintf("%s://%s", uri.Scheme, uri.Host)
+
+ // Note(gabriel-samfira): Endpoints should be cached. We don't expect to have a large number
+ // of endpoints. In most cases there will be just one (github.com). In cases where there is
+ // a GHES involved, those users will have just one extra endpoint or 2 (if they also have a
+ // test env). But there should be a relatively small number, regardless. So we don't really care
+ // that much about the performance of this function.
+ var endpoints []params.ForgeEndpoint
+ switch forgeType {
+ case params.GithubEndpointType:
+ endpoints, err = r.store.ListGithubEndpoints(r.ctx)
+ case params.GiteaEndpointType:
+ endpoints, err = r.store.ListGiteaEndpoints(r.ctx)
+ default:
+ return params.ForgeEndpoint{}, runnerErrors.NewBadRequestError("unknown forge type %s", forgeType)
+ }
+
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error fetching github endpoints: %w", err)
+ }
+ for _, ep := range endpoints {
+ slog.DebugContext(r.ctx, "checking endpoint", "base_uri", baseURI, "endpoint", ep.BaseURL)
+ epBaseURI := strings.TrimSuffix(ep.BaseURL, "/")
+ if epBaseURI == baseURI {
+ return ep, nil
+ }
+ }
+
+ return params.ForgeEndpoint{}, runnerErrors.NewNotFoundError("no endpoint found for job")
+}
+
+func (r *Runner) DispatchWorkflowJob(hookTargetType, signature string, forgeType params.EndpointType, jobData []byte) error {
if len(jobData) == 0 {
+ slog.ErrorContext(r.ctx, "missing job data")
return runnerErrors.NewBadRequestError("missing job data")
}
var job params.WorkflowJob
if err := json.Unmarshal(jobData, &job); err != nil {
- return errors.Wrapf(runnerErrors.ErrBadRequest, "invalid job data: %s", err)
+ slog.ErrorContext(r.ctx, "failed to unmarshal job data", "error", err)
+ return fmt.Errorf("invalid job data %s: %w", err, runnerErrors.ErrBadRequest)
+ }
+
+ endpoint, err := r.findEndpointForJob(job, forgeType)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to find endpoint for job", "error", err)
+ return fmt.Errorf("error finding endpoint for job: %w", err)
}
var poolManager common.PoolManager
- var err error
switch HookTargetType(hookTargetType) {
case RepoHook:
- log.Printf("got hook for repo %s/%s", util.SanitizeLogEntry(job.Repository.Owner.Login), util.SanitizeLogEntry(job.Repository.Name))
- poolManager, err = r.findRepoPoolManager(job.Repository.Owner.Login, job.Repository.Name)
+ slog.DebugContext(
+ r.ctx, "got hook for repo",
+ "repo_owner", util.SanitizeLogEntry(job.Repository.Owner.Login),
+ "repo_name", util.SanitizeLogEntry(job.Repository.Name),
+ "endpoint", endpoint.Name)
+ poolManager, err = r.findRepoPoolManager(job.Repository.Owner.Login, job.Repository.Name, endpoint.Name)
case OrganizationHook:
- log.Printf("got hook for org %s", util.SanitizeLogEntry(job.Organization.Login))
- poolManager, err = r.findOrgPoolManager(job.Organization.Login)
+ slog.DebugContext(
+ r.ctx, "got hook for organization",
+ "organization", util.SanitizeLogEntry(job.GetOrgName(forgeType)),
+ "endpoint", endpoint.Name)
+ poolManager, err = r.findOrgPoolManager(job.GetOrgName(forgeType), endpoint.Name)
case EnterpriseHook:
- poolManager, err = r.findEnterprisePoolManager(job.Enterprise.Slug)
+ slog.DebugContext(
+ r.ctx, "got hook for enterprise",
+ "enterprise", util.SanitizeLogEntry(job.Enterprise.Slug),
+ "endpoint", endpoint.Name)
+ poolManager, err = r.findEnterprisePoolManager(job.Enterprise.Slug, endpoint.Name)
default:
return runnerErrors.NewBadRequestError("cannot handle hook target type %s", hookTargetType)
}
+ slog.DebugContext(r.ctx, "found pool manager", "pool_manager", poolManager.ID())
if err != nil {
+ slog.ErrorContext(r.ctx, "failed to find pool manager", "error", err, "hook_target_type", hookTargetType)
// We don't have a repository or organization configured that
// can handle this workflow job.
- return errors.Wrap(err, "fetching poolManager")
+ return fmt.Errorf("error fetching poolManager: %w", err)
}
// We found a pool. Validate the webhook job. If a secret is configured,
// we make sure that the source of this workflow job is valid.
secret := poolManager.WebhookSecret()
if err := r.validateHookBody(signature, secret, jobData); err != nil {
- return errors.Wrap(err, "validating webhook data")
+ slog.ErrorContext(r.ctx, "failed to validate webhook data", "error", err)
+ return fmt.Errorf("error validating webhook data: %w", err)
}
if err := poolManager.HandleWorkflowJob(job); err != nil {
- return errors.Wrap(err, "handling workflow job")
+ slog.ErrorContext(r.ctx, "failed to handle workflow job", "error", err)
+ return fmt.Errorf("error handling workflow job: %w", err)
}
return nil
@@ -736,7 +706,8 @@ func (r *Runner) DispatchWorkflowJob(hookTargetType, signature string, jobData [
func (r *Runner) appendTagsToCreatePoolParams(param params.CreatePoolParams) (params.CreatePoolParams, error) {
if err := param.Validate(); err != nil {
- return params.CreatePoolParams{}, errors.Wrapf(runnerErrors.ErrBadRequest, "validating params: %s", err)
+ return params.CreatePoolParams{}, fmt.Errorf("failed to validate params (%q): %w", err, runnerErrors.ErrBadRequest)
+ // errors.Wrapf(runnerErrors.ErrBadRequest, "validating params: %s", err)
}
if !IsSupportedOSType(param.OSType) {
@@ -752,57 +723,17 @@ func (r *Runner) appendTagsToCreatePoolParams(param params.CreatePoolParams) (pa
return params.CreatePoolParams{}, runnerErrors.NewBadRequestError("no such provider %s", param.ProviderName)
}
- newTags, err := r.processTags(string(param.OSArch), param.OSType, param.Tags)
- if err != nil {
- return params.CreatePoolParams{}, errors.Wrap(err, "processing tags")
- }
-
- param.Tags = newTags
-
return param, nil
}
-func (r *Runner) processTags(osArch string, osType commonParams.OSType, tags []string) ([]string, error) {
- // github automatically adds the "self-hosted" tag as well as the OS type (linux, windows, etc)
- // and architecture (arm, x64, etc) to all self hosted runners. When a workflow job comes in, we try
- // to find a pool based on the labels that are set in the workflow. If we don't explicitly define these
- // default tags for each pool, and the user targets these labels, we won't be able to match any pools.
- // The downside is that all pools with the same OS and arch will have these default labels. Users should
- // set distinct and unique labels on each pool, and explicitly target those labels, or risk assigning
- // the job to the wrong worker type.
- ghArch, err := util.ResolveToGithubArch(osArch)
- if err != nil {
- return nil, errors.Wrap(err, "invalid arch")
- }
-
- ghOSType, err := util.ResolveToGithubTag(osType)
- if err != nil {
- return nil, errors.Wrap(err, "invalid os type")
- }
-
- labels := []string{
- "self-hosted",
- ghArch,
- ghOSType,
- }
-
- for _, val := range tags {
- if val != "self-hosted" && val != ghArch && val != ghOSType {
- labels = append(labels, val)
- }
- }
-
- return labels, nil
-}
-
func (r *Runner) GetInstance(ctx context.Context, instanceName string) (params.Instance, error) {
if !auth.IsAdmin(ctx) {
return params.Instance{}, runnerErrors.ErrUnauthorized
}
- instance, err := r.store.GetInstanceByName(ctx, instanceName)
+ instance, err := r.store.GetInstance(ctx, instanceName)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
+ return params.Instance{}, fmt.Errorf("error fetching instance: %w", err)
}
return instance, nil
}
@@ -814,19 +745,19 @@ func (r *Runner) ListAllInstances(ctx context.Context) ([]params.Instance, error
instances, err := r.store.ListAllInstances(ctx)
if err != nil {
- return nil, errors.Wrap(err, "fetching instances")
+ return nil, fmt.Errorf("error fetching instances: %w", err)
}
return instances, nil
}
func (r *Runner) AddInstanceStatusMessage(ctx context.Context, param params.InstanceUpdateMessage) error {
- instanceID := auth.InstanceID(ctx)
- if instanceID == "" {
+ instanceName := auth.InstanceName(ctx)
+ if instanceName == "" {
return runnerErrors.ErrUnauthorized
}
- if err := r.store.AddInstanceEvent(ctx, instanceID, params.StatusEvent, params.EventInfo, param.Message); err != nil {
- return errors.Wrap(err, "adding status update")
+ if err := r.store.AddInstanceEvent(ctx, instanceName, params.StatusEvent, params.EventInfo, param.Message); err != nil {
+ return fmt.Errorf("error adding status update: %w", err)
}
updateParams := params.UpdateInstanceParams{
@@ -837,125 +768,200 @@ func (r *Runner) AddInstanceStatusMessage(ctx context.Context, param params.Inst
updateParams.AgentID = *param.AgentID
}
- if _, err := r.store.UpdateInstance(r.ctx, instanceID, updateParams); err != nil {
- return errors.Wrap(err, "updating runner state")
+ if _, err := r.store.UpdateInstance(r.ctx, instanceName, updateParams); err != nil {
+ return fmt.Errorf("error updating runner agent ID: %w", err)
}
return nil
}
-func (r *Runner) GetInstanceGithubRegistrationToken(ctx context.Context) (string, error) {
+func (r *Runner) UpdateSystemInfo(ctx context.Context, param params.UpdateSystemInfoParams) error {
instanceName := auth.InstanceName(ctx)
if instanceName == "" {
- return "", runnerErrors.ErrUnauthorized
+ slog.ErrorContext(ctx, "missing instance name")
+ return runnerErrors.ErrUnauthorized
}
- // Check if this instance already fetched a registration token. We only allow an instance to
- // fetch one token. If the instance fails to bootstrap after a token is fetched, we reset the
- // token fetched field when re-queueing the instance.
- if auth.InstanceTokenFetched(ctx) {
- return "", runnerErrors.ErrUnauthorized
+ if param.OSName == "" && param.OSVersion == "" && param.AgentID == nil {
+ // Nothing to update
+ return nil
}
- status := auth.InstanceRunnerStatus(ctx)
- if status != params.RunnerPending && status != params.RunnerInstalling {
- return "", runnerErrors.ErrUnauthorized
- }
-
- instance, err := r.store.GetInstanceByName(ctx, instanceName)
- if err != nil {
- return "", errors.Wrap(err, "fetching instance")
- }
-
- poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
- if err != nil {
- return "", errors.Wrap(err, "fetching pool manager for instance")
- }
-
- token, err := poolMgr.GithubRunnerRegistrationToken()
- if err != nil {
- return "", errors.Wrap(err, "fetching runner token")
- }
-
- tokenFetched := true
updateParams := params.UpdateInstanceParams{
- TokenFetched: &tokenFetched,
+ OSName: param.OSName,
+ OSVersion: param.OSVersion,
}
- if _, err := r.store.UpdateInstance(r.ctx, instance.ID, updateParams); err != nil {
- return "", errors.Wrap(err, "setting token_fetched for instance")
+ if param.AgentID != nil {
+ updateParams.AgentID = *param.AgentID
}
- if err := r.store.AddInstanceEvent(ctx, instance.ID, params.FetchTokenEvent, params.EventInfo, "runner registration token was retrieved"); err != nil {
- return "", errors.Wrap(err, "recording event")
+ if _, err := r.store.UpdateInstance(r.ctx, instanceName, updateParams); err != nil {
+ return fmt.Errorf("error updating runner system info: %w", err)
}
- return token, nil
+ return nil
}
func (r *Runner) getPoolManagerFromInstance(ctx context.Context, instance params.Instance) (common.PoolManager, error) {
pool, err := r.store.GetPoolByID(ctx, instance.PoolID)
if err != nil {
- return nil, errors.Wrap(err, "fetching pool")
+ return nil, fmt.Errorf("error fetching pool: %w", err)
}
var poolMgr common.PoolManager
- if pool.RepoID != "" {
+ switch {
+ case pool.RepoID != "":
repo, err := r.store.GetRepositoryByID(ctx, pool.RepoID)
if err != nil {
- return nil, errors.Wrap(err, "fetching repo")
+ return nil, fmt.Errorf("error fetching repo: %w", err)
}
- poolMgr, err = r.findRepoPoolManager(repo.Owner, repo.Name)
+ poolMgr, err = r.findRepoPoolManager(repo.Owner, repo.Name, repo.Endpoint.Name)
if err != nil {
- return nil, errors.Wrapf(err, "fetching pool manager for repo %s", pool.RepoName)
+ return nil, fmt.Errorf("error fetching pool manager for repo %s: %w", pool.RepoName, err)
}
- } else if pool.OrgID != "" {
+ case pool.OrgID != "":
org, err := r.store.GetOrganizationByID(ctx, pool.OrgID)
if err != nil {
- return nil, errors.Wrap(err, "fetching org")
+ return nil, fmt.Errorf("error fetching org: %w", err)
}
- poolMgr, err = r.findOrgPoolManager(org.Name)
+ poolMgr, err = r.findOrgPoolManager(org.Name, org.Endpoint.Name)
if err != nil {
- return nil, errors.Wrapf(err, "fetching pool manager for org %s", pool.OrgName)
+ return nil, fmt.Errorf("error fetching pool manager for org %s: %w", pool.OrgName, err)
}
- } else if pool.EnterpriseID != "" {
+ case pool.EnterpriseID != "":
enterprise, err := r.store.GetEnterpriseByID(ctx, pool.EnterpriseID)
if err != nil {
- return nil, errors.Wrap(err, "fetching enterprise")
+ return nil, fmt.Errorf("error fetching enterprise: %w", err)
}
- poolMgr, err = r.findEnterprisePoolManager(enterprise.Name)
+ poolMgr, err = r.findEnterprisePoolManager(enterprise.Name, enterprise.Endpoint.Name)
if err != nil {
- return nil, errors.Wrapf(err, "fetching pool manager for enterprise %s", pool.EnterpriseName)
+ return nil, fmt.Errorf("error fetching pool manager for enterprise %s: %w", pool.EnterpriseName, err)
}
}
return poolMgr, nil
}
-func (r *Runner) ForceDeleteRunner(ctx context.Context, instanceName string) error {
+// DeleteRunner removes a runner from a pool. If forceDelete is true, GARM will ignore any provider errors
+// that may occur, and attempt to remove the runner from GitHub and then the database, regardless of provider
+// errors.
+func (r *Runner) DeleteRunner(ctx context.Context, instanceName string, forceDelete, bypassGithubUnauthorized bool) error {
if !auth.IsAdmin(ctx) {
return runnerErrors.ErrUnauthorized
}
- instance, err := r.store.GetInstanceByName(ctx, instanceName)
+ instance, err := r.store.GetInstance(ctx, instanceName)
if err != nil {
- return errors.Wrap(err, "fetching instance")
+ return fmt.Errorf("error fetching instance: %w", err)
}
switch instance.Status {
- case commonParams.InstanceRunning, commonParams.InstanceError:
+ case commonParams.InstanceRunning, commonParams.InstanceError,
+ commonParams.InstancePendingForceDelete, commonParams.InstancePendingDelete:
default:
- return runnerErrors.NewBadRequestError("runner must be in %q or %q state", commonParams.InstanceRunning, commonParams.InstanceError)
+ validStates := []string{
+ string(commonParams.InstanceRunning),
+ string(commonParams.InstanceError),
+ string(commonParams.InstancePendingForceDelete),
+ string(commonParams.InstancePendingDelete),
+ }
+ return runnerErrors.NewBadRequestError("runner must be in one of the following states: %q", strings.Join(validStates, ", "))
}
- poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
+ ghCli, ssCli, err := r.getGHCliFromInstance(ctx, instance)
if err != nil {
- return errors.Wrap(err, "fetching pool manager for instance")
+ return fmt.Errorf("error fetching github client: %w", err)
}
- if err := poolMgr.ForceDeleteRunner(instance); err != nil {
- return errors.Wrap(err, "removing runner")
+ if instance.AgentID != 0 {
+ switch {
+ case instance.ScaleSetID != 0:
+ err = ssCli.RemoveRunner(ctx, instance.AgentID)
+ case instance.PoolID != "":
+ err = ghCli.RemoveEntityRunner(ctx, instance.AgentID)
+ default:
+ return errors.New("instance does not have a pool or scale set")
+ }
+
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ if errors.Is(err, runnerErrors.ErrUnauthorized) && instance.PoolID != "" {
+ poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager for instance: %w", err)
+ }
+ poolMgr.SetPoolRunningState(false, fmt.Sprintf("failed to remove runner: %q", err))
+ }
+ if !bypassGithubUnauthorized {
+ return fmt.Errorf("error removing runner from github: %w", err)
+ }
+ }
+ }
}
+
+ instanceStatus := commonParams.InstancePendingDelete
+ if forceDelete {
+ instanceStatus = commonParams.InstancePendingForceDelete
+ }
+
+ slog.InfoContext(
+ r.ctx, "setting instance status",
+ "runner_name", instance.Name,
+ "status", instanceStatus)
+
+ updateParams := params.UpdateInstanceParams{
+ Status: instanceStatus,
+ }
+ _, err = r.store.UpdateInstance(r.ctx, instance.Name, updateParams)
+ if err != nil {
+ return fmt.Errorf("error updating runner state: %w", err)
+ }
+
return nil
}
+
+func (r *Runner) getGHCliFromInstance(ctx context.Context, instance params.Instance) (common.GithubClient, *scalesets.ScaleSetClient, error) {
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): We can probably cache the entity.
+ var entityGetter params.EntityGetter
+ var err error
+
+ switch {
+ case instance.PoolID != "":
+ entityGetter, err = r.store.GetPoolByID(ctx, instance.PoolID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error fetching pool: %w", err)
+ }
+ case instance.ScaleSetID != 0:
+ entityGetter, err = r.store.GetScaleSetByID(ctx, instance.ScaleSetID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error fetching scale set: %w", err)
+ }
+ default:
+ return nil, nil, errors.New("instance does not have a pool or scale set")
+ }
+
+ entity, err := entityGetter.GetEntity()
+ if err != nil {
+ return nil, nil, fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ // Fetching the entity from the database will populate all fields, including credentials.
+ entity, err = r.store.GetForgeEntity(ctx, entity.EntityType, entity.ID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ ghCli, err := github.Client(ctx, entity)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error creating github client: %w", err)
+ }
+
+ scaleSetCli, err := scalesets.NewClient(ghCli)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error creating scaleset client: %w", err)
+ }
+ return ghCli, scaleSetCli, nil
+}
diff --git a/runner/scalesets.go b/runner/scalesets.go
new file mode 100644
index 00000000..136ddec2
--- /dev/null
+++ b/runner/scalesets.go
@@ -0,0 +1,297 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/util/appdefaults"
+ "github.com/cloudbase/garm/util/github"
+ "github.com/cloudbase/garm/util/github/scalesets"
+)
+
+func (r *Runner) ListAllScaleSets(ctx context.Context) ([]params.ScaleSet, error) {
+ if !auth.IsAdmin(ctx) {
+ return []params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+
+ scalesets, err := r.store.ListAllScaleSets(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching pools: %w", err)
+ }
+ return scalesets, nil
+}
+
+func (r *Runner) GetScaleSetByID(ctx context.Context, scaleSet uint) (params.ScaleSet, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+
+ set, err := r.store.GetScaleSetByID(ctx, scaleSet)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+ return set, nil
+}
+
+func (r *Runner) DeleteScaleSetByID(ctx context.Context, scaleSetID uint) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ scaleSet, err := r.store.GetScaleSetByID(ctx, scaleSetID)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+ return nil
+ }
+
+ if len(scaleSet.Instances) > 0 {
+ return runnerErrors.NewBadRequestError("scale set has runners")
+ }
+
+ if scaleSet.Enabled {
+ return runnerErrors.NewBadRequestError("scale set is enabled; disable it first")
+ }
+
+ paramEntity, err := scaleSet.GetEntity()
+ if err != nil {
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ entity, err := r.store.GetForgeEntity(ctx, paramEntity.EntityType, paramEntity.ID)
+ if err != nil {
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ ghCli, err := github.Client(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error creating github client: %w", err)
+ }
+
+ scalesetCli, err := scalesets.NewClient(ghCli)
+ if err != nil {
+ return fmt.Errorf("error getting scaleset client: %w", err)
+ }
+
+ slog.DebugContext(ctx, "deleting scale set", "scale_set_id", scaleSet.ScaleSetID)
+ if err := scalesetCli.DeleteRunnerScaleSet(ctx, scaleSet.ScaleSetID); err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ slog.InfoContext(ctx, "scale set not found", "scale_set_id", scaleSet.ScaleSetID)
+ return nil
+ }
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete scale set from github")
+ return fmt.Errorf("error deleting scale set from github: %w", err)
+ }
+ if err := r.store.DeleteScaleSetByID(ctx, scaleSetID); err != nil {
+ return fmt.Errorf("error deleting scale set: %w", err)
+ }
+ return nil
+}
+
+func (r *Runner) UpdateScaleSetByID(ctx context.Context, scaleSetID uint, param params.UpdateScaleSetParams) (params.ScaleSet, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+
+ scaleSet, err := r.store.GetScaleSetByID(ctx, scaleSetID)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ maxRunners := scaleSet.MaxRunners
+ minIdleRunners := scaleSet.MinIdleRunners
+
+ if param.MaxRunners != nil {
+ maxRunners = *param.MaxRunners
+ }
+ if param.MinIdleRunners != nil {
+ minIdleRunners = *param.MinIdleRunners
+ }
+
+ if param.RunnerBootstrapTimeout != nil && *param.RunnerBootstrapTimeout == 0 {
+ return params.ScaleSet{}, runnerErrors.NewBadRequestError("runner_bootstrap_timeout cannot be 0")
+ }
+
+ if minIdleRunners > maxRunners {
+ return params.ScaleSet{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
+ }
+
+ paramEntity, err := scaleSet.GetEntity()
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting entity: %w", err)
+ }
+
+ entity, err := r.store.GetForgeEntity(ctx, paramEntity.EntityType, paramEntity.ID)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting entity: %w", err)
+ }
+
+ ghCli, err := github.Client(ctx, entity)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error creating github client: %w", err)
+ }
+
+ scalesetCli, err := scalesets.NewClient(ghCli)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting scaleset client: %w", err)
+ }
+
+ callback := func(old, newSet params.ScaleSet) error {
+ updateParams := params.RunnerScaleSet{}
+ hasUpdates := false
+ if old.Name != newSet.Name {
+ updateParams.Name = newSet.Name
+ hasUpdates = true
+ }
+
+ if old.GitHubRunnerGroup != newSet.GitHubRunnerGroup {
+ runnerGroup, err := scalesetCli.GetRunnerGroupByName(ctx, newSet.GitHubRunnerGroup)
+ if err != nil {
+ return fmt.Errorf("error fetching runner group from github: %w", err)
+ }
+ updateParams.RunnerGroupID = runnerGroup.ID
+ hasUpdates = true
+ }
+
+ if old.DisableUpdate != newSet.DisableUpdate {
+ updateParams.RunnerSetting.DisableUpdate = newSet.DisableUpdate
+ hasUpdates = true
+ }
+
+ if hasUpdates {
+ _, err := scalesetCli.UpdateRunnerScaleSet(ctx, newSet.ScaleSetID, updateParams)
+ if err != nil {
+ return fmt.Errorf("failed to update scaleset in github: %w", err)
+ }
+ }
+ return nil
+ }
+
+ newScaleSet, err := r.store.UpdateEntityScaleSet(ctx, entity, scaleSetID, param, callback)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error updating pool: %w", err)
+ }
+ return newScaleSet, nil
+}
+
+func (r *Runner) CreateEntityScaleSet(ctx context.Context, entityType params.ForgeEntityType, entityID string, param params.CreateScaleSetParams) (scaleSetRet params.ScaleSet, err error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+
+ if param.RunnerBootstrapTimeout == 0 {
+ param.RunnerBootstrapTimeout = appdefaults.DefaultRunnerBootstrapTimeout
+ }
+
+ if param.GitHubRunnerGroup == "" {
+ param.GitHubRunnerGroup = "Default"
+ }
+
+ entity, err := r.store.GetForgeEntity(ctx, entityType, entityID)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting entity: %w", err)
+ }
+
+ if entity.Credentials.ForgeType != params.GithubEndpointType {
+ return params.ScaleSet{}, runnerErrors.NewBadRequestError("scale sets are only supported for github entities")
+ }
+
+ ghCli, err := github.Client(ctx, entity)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error creating github client: %w", err)
+ }
+
+ scalesetCli, err := scalesets.NewClient(ghCli)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting scaleset client: %w", err)
+ }
+
+ runnerGroupID, err := ghCli.GetEntityRunnerGroupIDByName(ctx, param.GitHubRunnerGroup)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("failed to get github runner group for entity %s: %w", entity.ID, err)
+ }
+
+ createParam := ¶ms.RunnerScaleSet{
+ Name: param.Name,
+ RunnerGroupID: runnerGroupID,
+ Labels: []params.Label{
+ {
+ Name: param.Name,
+ Type: "System",
+ },
+ },
+ RunnerSetting: params.RunnerSetting{
+ Ephemeral: true,
+ DisableUpdate: param.DisableUpdate,
+ },
+ Enabled: ¶m.Enabled,
+ }
+
+ runnerScaleSet, err := scalesetCli.CreateRunnerScaleSet(ctx, createParam)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error creating runner scale set: %w", err)
+ }
+
+ defer func() {
+ if err != nil {
+ if innerErr := scalesetCli.DeleteRunnerScaleSet(ctx, runnerScaleSet.ID); innerErr != nil {
+ slog.With(slog.Any("error", innerErr)).ErrorContext(ctx, "failed to cleanup scale set")
+ }
+ }
+ }()
+ param.ScaleSetID = runnerScaleSet.ID
+
+ scaleSet, err := r.store.CreateEntityScaleSet(ctx, entity, param)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error creating scale set: %w", err)
+ }
+
+ return scaleSet, nil
+}
+
+func (r *Runner) ListScaleSetInstances(ctx context.Context, scalesetID uint) ([]params.Instance, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ instances, err := r.store.ListScaleSetInstances(ctx, scalesetID)
+ if err != nil {
+ return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err)
+ }
+ return instances, nil
+}
+
+func (r *Runner) ListEntityScaleSets(ctx context.Context, entityType params.ForgeEntityType, entityID string) ([]params.ScaleSet, error) {
+ if !auth.IsAdmin(ctx) {
+ return []params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+ entity := params.ForgeEntity{
+ ID: entityID,
+ EntityType: entityType,
+ }
+ scaleSets, err := r.store.ListEntityScaleSets(ctx, entity)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching scale sets: %w", err)
+ }
+ return scaleSets, nil
+}
diff --git a/scripts/build-static.sh b/scripts/build-static.sh
index cafa6a55..1f81752e 100755
--- a/scripts/build-static.sh
+++ b/scripts/build-static.sh
@@ -1,43 +1,67 @@
#!/bin/sh
GARM_SOURCE="/build/garm"
-BIN_DIR="$GARM_SOURCE/bin"
-git config --global --add safe.directory "$GARM_SOURCE"
+git config --global --add safe.directory /build/garm
+cd $GARM_SOURCE
-[ ! -d "$BIN_DIR" ] && mkdir -p "$BIN_DIR"
+CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
+if [ ! -z "$GARM_REF" ] && [ "$GARM_REF" != "$CURRENT_BRANCH" ];then
+ git checkout $GARM_REF
+fi
+
+cd $GARM_SOURCE
+
+OUTPUT_DIR="/build/output"
+VERSION=$(git describe --tags --match='v[0-9]*' --dirty --always)
+BUILD_DIR="$OUTPUT_DIR/$VERSION"
+
+
+[ ! -d "$BUILD_DIR/linux" ] && mkdir -p "$BUILD_DIR/linux"
+[ ! -d "$BUILD_DIR/windows" ] && mkdir -p "$BUILD_DIR/windows"
export CGO_ENABLED=1
USER_ID=${USER_ID:-$UID}
USER_GROUP=${USER_GROUP:-$(id -g)}
-mkdir -p $BIN_DIR/amd64 $BIN_DIR/arm64
+# Garm
cd $GARM_SOURCE/cmd/garm
-go build -mod vendor \
- -o $BIN_DIR/amd64/garm \
+
+# Linux
+GOOS=linux GOARCH=amd64 go build -mod vendor \
+ -o $BUILD_DIR/linux/amd64/garm \
-tags osusergo,netgo,sqlite_omit_load_extension \
- -ldflags "-linkmode external -extldflags '-static' -s -w -X main.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" .
-CC=aarch64-linux-musl-gcc GOARCH=arm64 go build \
+ -ldflags "-extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+GOOS=linux GOARCH=arm64 CC=aarch64-linux-musl-gcc go build \
-mod vendor \
- -o $BIN_DIR/arm64/garm \
+ -o $BUILD_DIR/linux/arm64/garm \
-tags osusergo,netgo,sqlite_omit_load_extension \
- -ldflags "-linkmode external -extldflags '-static' -s -w -X main.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" .
-# GOOS=windows CC=x86_64-w64-mingw32-cc go build -mod vendor \
-# -o $BIN_DIR/amd64/garm.exe \
-# -tags osusergo,netgo,sqlite_omit_load_extension \
-# -ldflags "-s -w -X main.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" .
+ -ldflags "-extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+# Windows
+GOOS=windows GOARCH=amd64 CC=x86_64-w64-mingw32-cc go build -mod vendor \
+ -o $BUILD_DIR/windows/amd64/garm.exe \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+
+# garm-cli
cd $GARM_SOURCE/cmd/garm-cli
-go build -mod vendor \
- -o $BIN_DIR/amd64/garm-cli \
- -tags osusergo,netgo,sqlite_omit_load_extension \
- -ldflags "-linkmode external -extldflags '-static' -s -w -X github.com/cloudbase/garm/cmd/garm-cli/cmd.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" .
-CC=aarch64-linux-musl-gcc GOARCH=arm64 go build -mod vendor \
- -o $BIN_DIR/arm64/garm-cli \
- -tags osusergo,netgo,sqlite_omit_load_extension \
- -ldflags "-linkmode external -extldflags '-static' -s -w -X github.com/cloudbase/garm/cmd/garm-cli/cmd.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" .
-# GOOS=windows CGO_ENABLED=0 go build -mod vendor \
-# -o $BIN_DIR/amd64/garm-cli.exe \
-# -tags osusergo,netgo,sqlite_omit_load_extension \
-# -ldflags "-s -w -X github.com/cloudbase/garm/cmd/garm-cli/cmd.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" .
-chown $USER_ID:$USER_GROUP -R "$BIN_DIR"
+# Linux
+GOOS=linux GOARCH=amd64 go build -mod vendor \
+ -o $BUILD_DIR/linux/amd64/garm-cli \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+GOOS=linux GOARCH=arm64 CC=aarch64-linux-musl-gcc go build -mod vendor \
+ -o $BUILD_DIR/linux/arm64/garm-cli \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+
+# Windows
+GOOS=windows GOARCH=amd64 go build -mod vendor \
+ -o $BUILD_DIR/windows/amd64/garm-cli.exe \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+
+
+git checkout $CURRENT_BRANCH || true
+chown $USER_ID:$USER_GROUP -R "$OUTPUT_DIR"
diff --git a/scripts/make-release.sh b/scripts/make-release.sh
new file mode 100755
index 00000000..fc9c6b04
--- /dev/null
+++ b/scripts/make-release.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+echo $GARM_REF
+
+VERSION=$(git describe --tags --match='v[0-9]*' --dirty --always)
+RELEASE="$PWD/release"
+
+[ ! -d "$RELEASE" ] && mkdir -p "$RELEASE"
+
+if [ ! -z "$GARM_REF" ]; then
+ VERSION=$(git describe --tags --match='v[0-9]*' --always $GARM_REF)
+fi
+
+echo $VERSION
+
+if [ ! -d "build/$VERSION" ]; then
+ echo "missing build/$VERSION"
+ exit 1
+fi
+
+# Windows
+
+if [ ! -d "build/$VERSION/windows/amd64" ];then
+ echo "missing build/$VERSION/windows/amd64"
+ exit 1
+fi
+
+WINDOWS_FILES=("garm.exe" "garm-cli.exe")
+
+for file in ${WINDOWS_FILES[@]};do
+ if [ ! -f "build/$VERSION/windows/amd64/$file" ];then
+ echo "missing build/$VERSION/windows/amd64/$file"
+ exit 1
+ fi
+
+ pushd build/$VERSION/windows/amd64
+ zip ${file%%.exe}-windows-amd64.zip $file
+ sha256sum ${file%%.exe}-windows-amd64.zip > ${file%%.exe}-windows-amd64.zip.sha256
+ mv ${file%%.exe}-windows-amd64.zip $RELEASE
+ mv ${file%%.exe}-windows-amd64.zip.sha256 $RELEASE
+ popd
+done
+
+# Linux
+OS_ARCHES=("amd64" "arm64")
+FILES=("garm" "garm-cli")
+
+for arch in ${OS_ARCHES[@]};do
+ for file in ${FILES[@]};do
+ if [ ! -f "build/$VERSION/linux/$arch/$file" ];then
+ echo "missing build/$VERSION/linux/$arch/$file"
+ exit 1
+ fi
+
+ pushd build/$VERSION/linux/$arch
+ tar czf ${file}-linux-$arch.tgz $file
+ sha256sum ${file}-linux-$arch.tgz > ${file}-linux-$arch.tgz.sha256
+ mv ${file}-linux-$arch.tgz $RELEASE
+ mv ${file}-linux-$arch.tgz.sha256 $RELEASE
+ popd
+ done
+done
diff --git a/test/integration/client_utils.go b/test/integration/client_utils.go
new file mode 100644
index 00000000..e423c107
--- /dev/null
+++ b/test/integration/client_utils.go
@@ -0,0 +1,512 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package integration
+
+import (
+ "github.com/go-openapi/runtime"
+
+ "github.com/cloudbase/garm/client"
+ clientControllerInfo "github.com/cloudbase/garm/client/controller_info"
+ clientCredentials "github.com/cloudbase/garm/client/credentials"
+ clientEndpoints "github.com/cloudbase/garm/client/endpoints"
+ clientFirstRun "github.com/cloudbase/garm/client/first_run"
+ clientInstances "github.com/cloudbase/garm/client/instances"
+ clientJobs "github.com/cloudbase/garm/client/jobs"
+ clientLogin "github.com/cloudbase/garm/client/login"
+ clientMetricsToken "github.com/cloudbase/garm/client/metrics_token"
+ clientOrganizations "github.com/cloudbase/garm/client/organizations"
+ clientPools "github.com/cloudbase/garm/client/pools"
+ clientProviders "github.com/cloudbase/garm/client/providers"
+ clientRepositories "github.com/cloudbase/garm/client/repositories"
+ "github.com/cloudbase/garm/params"
+)
+
+// firstRun will initialize a new garm installation.
+func firstRun(apiCli *client.GarmAPI, newUser params.NewUserParams) (params.User, error) {
+ firstRunResponse, err := apiCli.FirstRun.FirstRun(
+ clientFirstRun.NewFirstRunParams().WithBody(newUser),
+ nil)
+ if err != nil {
+ return params.User{}, err
+ }
+ return firstRunResponse.Payload, nil
+}
+
+func login(apiCli *client.GarmAPI, params params.PasswordLoginParams) (string, error) {
+ loginResponse, err := apiCli.Login.Login(
+ clientLogin.NewLoginParams().WithBody(params),
+ nil)
+ if err != nil {
+ return "", err
+ }
+ return loginResponse.Payload.Token, nil
+}
+
+// listCredentials lists all the credentials configured in GARM.
+func listCredentials(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Credentials, error) {
+ listCredentialsResponse, err := apiCli.Credentials.ListCredentials(
+ clientCredentials.NewListCredentialsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listCredentialsResponse.Payload, nil
+}
+
+func createGithubCredentials(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, credentialsParams params.CreateGithubCredentialsParams) (*params.ForgeCredentials, error) {
+ createCredentialsResponse, err := apiCli.Credentials.CreateCredentials(
+ clientCredentials.NewCreateCredentialsParams().WithBody(credentialsParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createCredentialsResponse.Payload, nil
+}
+
+func deleteGithubCredentials(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, credentialsID int64) error {
+ return apiCli.Credentials.DeleteCredentials(
+ clientCredentials.NewDeleteCredentialsParams().WithID(credentialsID),
+ apiAuthToken)
+}
+
+func updateGithubCredentials(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, credentialsID int64, credentialsParams params.UpdateGithubCredentialsParams) (*params.ForgeCredentials, error) {
+ updateCredentialsResponse, err := apiCli.Credentials.UpdateCredentials(
+ clientCredentials.NewUpdateCredentialsParams().WithID(credentialsID).WithBody(credentialsParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateCredentialsResponse.Payload, nil
+}
+
+func createGithubEndpoint(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, endpointParams params.CreateGithubEndpointParams) (*params.ForgeEndpoint, error) {
+ createEndpointResponse, err := apiCli.Endpoints.CreateGithubEndpoint(
+ clientEndpoints.NewCreateGithubEndpointParams().WithBody(endpointParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createEndpointResponse.Payload, nil
+}
+
+func listGithubEndpoints(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.ForgeEndpoints, error) {
+ listEndpointsResponse, err := apiCli.Endpoints.ListGithubEndpoints(
+ clientEndpoints.NewListGithubEndpointsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listEndpointsResponse.Payload, nil
+}
+
+func getGithubEndpoint(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, endpointName string) (*params.ForgeEndpoint, error) {
+ getEndpointResponse, err := apiCli.Endpoints.GetGithubEndpoint(
+ clientEndpoints.NewGetGithubEndpointParams().WithName(endpointName),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getEndpointResponse.Payload, nil
+}
+
+func deleteGithubEndpoint(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, endpointName string) error {
+ return apiCli.Endpoints.DeleteGithubEndpoint(
+ clientEndpoints.NewDeleteGithubEndpointParams().WithName(endpointName),
+ apiAuthToken)
+}
+
+func updateGithubEndpoint(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, endpointName string, endpointParams params.UpdateGithubEndpointParams) (*params.ForgeEndpoint, error) {
+ updateEndpointResponse, err := apiCli.Endpoints.UpdateGithubEndpoint(
+ clientEndpoints.NewUpdateGithubEndpointParams().WithName(endpointName).WithBody(endpointParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateEndpointResponse.Payload, nil
+}
+
+// listProviders lists all the providers configured in GARM.
+func listProviders(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Providers, error) {
+ listProvidersResponse, err := apiCli.Providers.ListProviders(
+ clientProviders.NewListProvidersParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listProvidersResponse.Payload, nil
+}
+
+// getControllerInfo returns information about the GARM controller.
+func getControllerInfo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.ControllerInfo, error) {
+ controllerInfoResponse, err := apiCli.ControllerInfo.ControllerInfo(
+ clientControllerInfo.NewControllerInfoParams(),
+ apiAuthToken)
+ if err != nil {
+ return params.ControllerInfo{}, err
+ }
+ return controllerInfoResponse.Payload, nil
+}
+
+// listJobs lists all the jobs configured in GARM.
+func listJobs(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Jobs, error) {
+ listJobsResponse, err := apiCli.Jobs.ListJobs(
+ clientJobs.NewListJobsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listJobsResponse.Payload, nil
+}
+
+// getMetricsToken returns the metrics token.
+func getMetricsToken(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (string, error) {
+ getMetricsTokenResponse, err := apiCli.MetricsToken.GetMetricsToken(
+ clientMetricsToken.NewGetMetricsTokenParams(),
+ apiAuthToken)
+ if err != nil {
+ return "", err
+ }
+ return getMetricsTokenResponse.Payload.Token, nil
+}
+
+// ///////////////
+// Repositories //
+// ///////////////
+func createRepo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoParams params.CreateRepoParams) (*params.Repository, error) {
+ createRepoResponse, err := apiCli.Repositories.CreateRepo(
+ clientRepositories.NewCreateRepoParams().WithBody(repoParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createRepoResponse.Payload, nil
+}
+
+func listRepos(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Repositories, error) {
+ listReposResponse, err := apiCli.Repositories.ListRepos(
+ clientRepositories.NewListReposParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listReposResponse.Payload, nil
+}
+
+func updateRepo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string, repoParams params.UpdateEntityParams) (*params.Repository, error) {
+ updateRepoResponse, err := apiCli.Repositories.UpdateRepo(
+ clientRepositories.NewUpdateRepoParams().WithRepoID(repoID).WithBody(repoParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateRepoResponse.Payload, nil
+}
+
+func getRepo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) (*params.Repository, error) {
+ getRepoResponse, err := apiCli.Repositories.GetRepo(
+ clientRepositories.NewGetRepoParams().WithRepoID(repoID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getRepoResponse.Payload, nil
+}
+
+func installRepoWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string, webhookParams params.InstallWebhookParams) (*params.HookInfo, error) {
+ installRepoWebhookResponse, err := apiCli.Repositories.InstallRepoWebhook(
+ clientRepositories.NewInstallRepoWebhookParams().WithRepoID(repoID).WithBody(webhookParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &installRepoWebhookResponse.Payload, nil
+}
+
+func getRepoWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) (*params.HookInfo, error) {
+ getRepoWebhookResponse, err := apiCli.Repositories.GetRepoWebhookInfo(
+ clientRepositories.NewGetRepoWebhookInfoParams().WithRepoID(repoID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getRepoWebhookResponse.Payload, nil
+}
+
+func uninstallRepoWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) error {
+ return apiCli.Repositories.UninstallRepoWebhook(
+ clientRepositories.NewUninstallRepoWebhookParams().WithRepoID(repoID),
+ apiAuthToken)
+}
+
+func createRepoPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string, poolParams params.CreatePoolParams) (*params.Pool, error) {
+ createRepoPoolResponse, err := apiCli.Repositories.CreateRepoPool(
+ clientRepositories.NewCreateRepoPoolParams().WithRepoID(repoID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createRepoPoolResponse.Payload, nil
+}
+
+func listRepoPools(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) (params.Pools, error) {
+ listRepoPoolsResponse, err := apiCli.Repositories.ListRepoPools(
+ clientRepositories.NewListRepoPoolsParams().WithRepoID(repoID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listRepoPoolsResponse.Payload, nil
+}
+
+func getRepoPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID, poolID string) (*params.Pool, error) {
+ getRepoPoolResponse, err := apiCli.Repositories.GetRepoPool(
+ clientRepositories.NewGetRepoPoolParams().WithRepoID(repoID).WithPoolID(poolID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getRepoPoolResponse.Payload, nil
+}
+
+func updateRepoPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID, poolID string, poolParams params.UpdatePoolParams) (*params.Pool, error) {
+ updateRepoPoolResponse, err := apiCli.Repositories.UpdateRepoPool(
+ clientRepositories.NewUpdateRepoPoolParams().WithRepoID(repoID).WithPoolID(poolID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateRepoPoolResponse.Payload, nil
+}
+
+func listRepoInstances(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) (params.Instances, error) {
+ listRepoInstancesResponse, err := apiCli.Repositories.ListRepoInstances(
+ clientRepositories.NewListRepoInstancesParams().WithRepoID(repoID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listRepoInstancesResponse.Payload, nil
+}
+
+func deleteRepo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) error {
+ return apiCli.Repositories.DeleteRepo(
+ clientRepositories.NewDeleteRepoParams().WithRepoID(repoID),
+ apiAuthToken)
+}
+
+func deleteRepoPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID, poolID string) error {
+ return apiCli.Repositories.DeleteRepoPool(
+ clientRepositories.NewDeleteRepoPoolParams().WithRepoID(repoID).WithPoolID(poolID),
+ apiAuthToken)
+}
+
+// ////////////////
+// Organizations //
+// ////////////////
+func createOrg(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgParams params.CreateOrgParams) (*params.Organization, error) {
+ createOrgResponse, err := apiCli.Organizations.CreateOrg(
+ clientOrganizations.NewCreateOrgParams().WithBody(orgParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createOrgResponse.Payload, nil
+}
+
+func listOrgs(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Organizations, error) {
+ listOrgsResponse, err := apiCli.Organizations.ListOrgs(
+ clientOrganizations.NewListOrgsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listOrgsResponse.Payload, nil
+}
+
+func updateOrg(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string, orgParams params.UpdateEntityParams) (*params.Organization, error) {
+ updateOrgResponse, err := apiCli.Organizations.UpdateOrg(
+ clientOrganizations.NewUpdateOrgParams().WithOrgID(orgID).WithBody(orgParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateOrgResponse.Payload, nil
+}
+
+func getOrg(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) (*params.Organization, error) {
+ getOrgResponse, err := apiCli.Organizations.GetOrg(
+ clientOrganizations.NewGetOrgParams().WithOrgID(orgID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getOrgResponse.Payload, nil
+}
+
+func installOrgWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string, webhookParams params.InstallWebhookParams) (*params.HookInfo, error) {
+ installOrgWebhookResponse, err := apiCli.Organizations.InstallOrgWebhook(
+ clientOrganizations.NewInstallOrgWebhookParams().WithOrgID(orgID).WithBody(webhookParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &installOrgWebhookResponse.Payload, nil
+}
+
+func getOrgWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) (*params.HookInfo, error) {
+ getOrgWebhookResponse, err := apiCli.Organizations.GetOrgWebhookInfo(
+ clientOrganizations.NewGetOrgWebhookInfoParams().WithOrgID(orgID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getOrgWebhookResponse.Payload, nil
+}
+
+func uninstallOrgWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) error {
+ return apiCli.Organizations.UninstallOrgWebhook(
+ clientOrganizations.NewUninstallOrgWebhookParams().WithOrgID(orgID),
+ apiAuthToken)
+}
+
+func createOrgPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string, poolParams params.CreatePoolParams) (*params.Pool, error) {
+ createOrgPoolResponse, err := apiCli.Organizations.CreateOrgPool(
+ clientOrganizations.NewCreateOrgPoolParams().WithOrgID(orgID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createOrgPoolResponse.Payload, nil
+}
+
+func listOrgPools(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) (params.Pools, error) {
+ listOrgPoolsResponse, err := apiCli.Organizations.ListOrgPools(
+ clientOrganizations.NewListOrgPoolsParams().WithOrgID(orgID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listOrgPoolsResponse.Payload, nil
+}
+
+func getOrgPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID, poolID string) (*params.Pool, error) {
+ getOrgPoolResponse, err := apiCli.Organizations.GetOrgPool(
+ clientOrganizations.NewGetOrgPoolParams().WithOrgID(orgID).WithPoolID(poolID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getOrgPoolResponse.Payload, nil
+}
+
+func updateOrgPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID, poolID string, poolParams params.UpdatePoolParams) (*params.Pool, error) {
+ updateOrgPoolResponse, err := apiCli.Organizations.UpdateOrgPool(
+ clientOrganizations.NewUpdateOrgPoolParams().WithOrgID(orgID).WithPoolID(poolID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateOrgPoolResponse.Payload, nil
+}
+
+func listOrgInstances(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) (params.Instances, error) {
+ listOrgInstancesResponse, err := apiCli.Organizations.ListOrgInstances(
+ clientOrganizations.NewListOrgInstancesParams().WithOrgID(orgID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listOrgInstancesResponse.Payload, nil
+}
+
+func deleteOrg(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) error {
+ return apiCli.Organizations.DeleteOrg(
+ clientOrganizations.NewDeleteOrgParams().WithOrgID(orgID),
+ apiAuthToken)
+}
+
+func deleteOrgPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID, poolID string) error {
+ return apiCli.Organizations.DeleteOrgPool(
+ clientOrganizations.NewDeleteOrgPoolParams().WithOrgID(orgID).WithPoolID(poolID),
+ apiAuthToken)
+}
+
+// ////////////
+// Instances //
+// ////////////
+func listInstances(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Instances, error) {
+ listInstancesResponse, err := apiCli.Instances.ListInstances(
+ clientInstances.NewListInstancesParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listInstancesResponse.Payload, nil
+}
+
+func getInstance(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, instanceID string) (*params.Instance, error) {
+ getInstancesResponse, err := apiCli.Instances.GetInstance(
+ clientInstances.NewGetInstanceParams().WithInstanceName(instanceID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getInstancesResponse.Payload, nil
+}
+
+func deleteInstance(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, instanceID string, forceRemove, bypassGHUnauthorized bool) error {
+ return apiCli.Instances.DeleteInstance(
+ clientInstances.NewDeleteInstanceParams().WithInstanceName(instanceID).WithForceRemove(&forceRemove).WithBypassGHUnauthorized(&bypassGHUnauthorized),
+ apiAuthToken)
+}
+
+// ////////
+// Pools //
+// ////////
+func listPools(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Pools, error) {
+ listPoolsResponse, err := apiCli.Pools.ListPools(
+ clientPools.NewListPoolsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listPoolsResponse.Payload, nil
+}
+
+func getPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, poolID string) (*params.Pool, error) {
+ getPoolResponse, err := apiCli.Pools.GetPool(
+ clientPools.NewGetPoolParams().WithPoolID(poolID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getPoolResponse.Payload, nil
+}
+
+func updatePool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, poolID string, poolParams params.UpdatePoolParams) (*params.Pool, error) {
+ updatePoolResponse, err := apiCli.Pools.UpdatePool(
+ clientPools.NewUpdatePoolParams().WithPoolID(poolID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updatePoolResponse.Payload, nil
+}
+
+func deletePool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, poolID string) error {
+ return apiCli.Pools.DeletePool(
+ clientPools.NewDeletePoolParams().WithPoolID(poolID),
+ apiAuthToken)
+}
diff --git a/test/integration/config/config.toml b/test/integration/config/config.toml
new file mode 100644
index 00000000..62c2d9d7
--- /dev/null
+++ b/test/integration/config/config.toml
@@ -0,0 +1,40 @@
+[default]
+callback_url = "${GARM_BASE_URL}/api/v1/callbacks"
+metadata_url = "${GARM_BASE_URL}/api/v1/metadata"
+webhook_url = "${GARM_BASE_URL}/webhooks"
+enable_webhook_management = true
+
+[metrics]
+enable = true
+disable_auth = false
+
+[jwt_auth]
+secret = "${JWT_AUTH_SECRET}"
+time_to_live = "8760h"
+
+[apiserver]
+bind = "0.0.0.0"
+port = ${GARM_PORT}
+use_tls = false
+
+[database]
+backend = "sqlite3"
+passphrase = "${DB_PASSPHRASE}"
+[database.sqlite3]
+ db_file = "${GARM_CONFIG_DIR}/garm.db"
+
+[[provider]]
+name = "lxd_local"
+provider_type = "external"
+description = "Local LXD installation"
+ [provider.external]
+ provider_executable = "${LXD_PROVIDER_EXECUTABLE}"
+ config_file = "${LXD_PROVIDER_CONFIG}"
+
+[[provider]]
+name = "test_external"
+description = "external test provider"
+provider_type = "external"
+ [provider.external]
+ config_file = "${GARM_CONFIG_DIR}/test-provider/config"
+ provider_executable = "${GARM_CONFIG_DIR}/test-provider/garm-external-provider"
\ No newline at end of file
diff --git a/test/integration/config/garm-provider-lxd.toml b/test/integration/config/garm-provider-lxd.toml
new file mode 100644
index 00000000..0b2ba3f0
--- /dev/null
+++ b/test/integration/config/garm-provider-lxd.toml
@@ -0,0 +1,21 @@
+unix_socket_path = "/var/snap/lxd/common/lxd/unix.socket"
+include_default_profile = false
+instance_type = "container"
+secure_boot = false
+project_name = "default"
+[image_remotes]
+ [image_remotes.ubuntu]
+ addr = "${LXD_REMOTE_SERVER}"
+ public = true
+ protocol = "simplestreams"
+ skip_verify = false
+ [image_remotes.ubuntu_daily]
+ addr = "https://cloud-images.ubuntu.com/daily"
+ public = true
+ protocol = "simplestreams"
+ skip_verify = false
+ [image_remotes.images]
+ addr = "https://images.linuxcontainers.org"
+ public = true
+ protocol = "simplestreams"
+ skip_verify = false
\ No newline at end of file
diff --git a/test/integration/config/garm.service b/test/integration/config/garm.service
new file mode 100644
index 00000000..9015947e
--- /dev/null
+++ b/test/integration/config/garm.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=GitHub Actions Runner Manager (garm)
+After=multi-user.target
+
+[Service]
+Type=simple
+ExecStart=/usr/local/bin/garm -config ${GARM_CONFIG_FILE}
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=default.target
diff --git a/test/integration/credentials_test.go b/test/integration/credentials_test.go
new file mode 100644
index 00000000..9b9387f6
--- /dev/null
+++ b/test/integration/credentials_test.go
@@ -0,0 +1,246 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "github.com/cloudbase/garm/params"
+)
+
+const (
+ defaultEndpointName string = "github.com"
+ dummyCredentialsName string = "dummy"
+)
+
+func (suite *GarmSuite) TestGithubCredentialsErrorOnDuplicateCredentialsName() {
+ t := suite.T()
+ t.Log("Testing error on duplicate credentials name")
+ creds, err := suite.createDummyCredentials(dummyCredentialsName, defaultEndpointName)
+ suite.NoError(err)
+ t.Cleanup(func() {
+ suite.DeleteGithubCredential(int64(creds.ID)) //nolint:gosec
+ })
+
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ _, err = createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with duplicate name")
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsToDeleteWhenInUse() {
+ t := suite.T()
+ t.Log("Testing error when deleting credentials in use")
+ creds, err := suite.createDummyCredentials(dummyCredentialsName, defaultEndpointName)
+ suite.NoError(err)
+
+ orgName := "dummy-owner"
+ repoName := "dummy-repo"
+ createParams := params.CreateRepoParams{
+ Owner: orgName,
+ Name: repoName,
+ CredentialsName: creds.Name,
+ WebhookSecret: "superSecret@123BlaBla",
+ }
+
+ t.Logf("Create repository with owner_name: %s, repo_name: %s", orgName, repoName)
+ repo, err := createRepo(suite.cli, suite.authToken, createParams)
+ suite.NoError(err)
+ t.Cleanup(func() {
+ deleteRepo(suite.cli, suite.authToken, repo.ID)
+ deleteGithubCredentials(suite.cli, suite.authToken, int64(creds.ID)) //nolint:gosec
+ })
+
+ err = deleteGithubCredentials(suite.cli, suite.authToken, int64(creds.ID)) //nolint:gosec
+ suite.Error(err, "expected error when deleting credentials in use")
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsOnInvalidAuthType() {
+ t := suite.T()
+ t.Log("Testing error on invalid auth type")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthType("invalid"),
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ _, err := createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with invalid auth type")
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsWhenAuthTypeParamsAreIncorrect() {
+ t := suite.T()
+ t.Log("Testing error when auth type params are incorrect")
+ privateKeyBytes, err := getTestFileContents("certs/srv-key.pem")
+ suite.NoError(err)
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ App: params.GithubApp{
+ AppID: 123,
+ InstallationID: 456,
+ PrivateKeyBytes: privateKeyBytes,
+ },
+ }
+ _, err = createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with invalid auth type params")
+
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsWhenAuthTypeParamsAreMissing() {
+ t := suite.T()
+ t.Log("Testing error when auth type params are missing")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypeApp,
+ }
+ _, err := createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with missing auth type params")
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsUpdateFailsWhenBothPATAndAppAreSupplied() {
+ t := suite.T()
+ t.Log("Testing error when both PAT and App are supplied")
+ creds, err := suite.createDummyCredentials(dummyCredentialsName, defaultEndpointName)
+ suite.NoError(err)
+ t.Cleanup(func() {
+ suite.DeleteGithubCredential(int64(creds.ID)) //nolint:gosec
+ })
+
+ privateKeyBytes, err := getTestFileContents("certs/srv-key.pem")
+ suite.NoError(err)
+ updateCredsParams := params.UpdateGithubCredentialsParams{
+ PAT: ¶ms.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ App: ¶ms.GithubApp{
+ AppID: 123,
+ InstallationID: 456,
+ PrivateKeyBytes: privateKeyBytes,
+ },
+ }
+ _, err = updateGithubCredentials(suite.cli, suite.authToken, int64(creds.ID), updateCredsParams) //nolint:gosec
+ suite.Error(err, "expected error when updating credentials with both PAT and App")
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailWhenAppKeyIsInvalid() {
+ t := suite.T()
+ t.Log("Testing error when app key is invalid")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypeApp,
+ App: params.GithubApp{
+ AppID: 123,
+ InstallationID: 456,
+ PrivateKeyBytes: []byte("invalid"),
+ },
+ }
+ _, err := createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with invalid app key")
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailWhenEndpointDoesntExist() {
+ t := suite.T()
+ t.Log("Testing error when endpoint doesn't exist")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: "iDontExist.example.com",
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ _, err := createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with invalid endpoint")
+ expectAPIStatusCode(err, 404)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsOnDuplicateName() {
+ t := suite.T()
+ t.Log("Testing error on duplicate credentials name")
+ creds, err := suite.createDummyCredentials(dummyCredentialsName, defaultEndpointName)
+ suite.NoError(err)
+ t.Cleanup(func() {
+ suite.DeleteGithubCredential(int64(creds.ID)) //nolint:gosec
+ })
+
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ _, err = createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with duplicate name")
+ expectAPIStatusCode(err, 409)
+}
+
+func (suite *GarmSuite) createDummyCredentials(name, endpointName string) (*params.ForgeCredentials, error) {
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: name,
+ Endpoint: endpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ return suite.CreateGithubCredentials(createCredsParams)
+}
+
+func (suite *GarmSuite) CreateGithubCredentials(credentialsParams params.CreateGithubCredentialsParams) (*params.ForgeCredentials, error) {
+ t := suite.T()
+ t.Log("Create GitHub credentials")
+ credentials, err := createGithubCredentials(suite.cli, suite.authToken, credentialsParams)
+ if err != nil {
+ return nil, err
+ }
+
+ return credentials, nil
+}
+
+func (suite *GarmSuite) DeleteGithubCredential(id int64) error {
+ t := suite.T()
+ t.Log("Delete GitHub credential")
+ if err := deleteGithubCredentials(suite.cli, suite.authToken, id); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/test/integration/endpoints.go b/test/integration/endpoints.go
new file mode 100644
index 00000000..720f43d2
--- /dev/null
+++ b/test/integration/endpoints.go
@@ -0,0 +1,62 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package integration
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/cloudbase/garm/params"
+)
+
+func checkEndpointParamsAreEqual(a, b params.ForgeEndpoint) error {
+ if a.Name != b.Name {
+ return fmt.Errorf("endpoint name mismatch")
+ }
+
+ if a.Description != b.Description {
+ return fmt.Errorf("endpoint description mismatch")
+ }
+
+ if a.BaseURL != b.BaseURL {
+ return fmt.Errorf("endpoint base URL mismatch")
+ }
+
+ if a.APIBaseURL != b.APIBaseURL {
+ return fmt.Errorf("endpoint API base URL mismatch")
+ }
+
+ if a.UploadBaseURL != b.UploadBaseURL {
+ return fmt.Errorf("endpoint upload base URL mismatch")
+ }
+
+ if string(a.CACertBundle) != string(b.CACertBundle) {
+ return fmt.Errorf("endpoint CA cert bundle mismatch")
+ }
+ return nil
+}
+
+func getTestFileContents(relPath string) ([]byte, error) {
+ baseDir := os.Getenv("GARM_CHECKOUT_DIR")
+ if baseDir == "" {
+ return nil, fmt.Errorf("ariable GARM_CHECKOUT_DIR not set")
+ }
+ contents, err := os.ReadFile(filepath.Join(baseDir, "testdata", relPath))
+ if err != nil {
+ return nil, err
+ }
+ return contents, nil
+}
diff --git a/test/integration/endpoints_test.go b/test/integration/endpoints_test.go
new file mode 100644
index 00000000..fe0dd160
--- /dev/null
+++ b/test/integration/endpoints_test.go
@@ -0,0 +1,226 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package integration
+
+import (
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestGithubEndpointOperations() {
+ t := suite.T()
+ t.Log("Testing endpoint operations")
+ suite.MustDefaultGithubEndpoint()
+
+ caBundle, err := getTestFileContents("certs/srv-pub.pem")
+ suite.NoError(err)
+
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "test-endpoint",
+ Description: "Test endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ CACertBundle: caBundle,
+ }
+
+ endpoint, err := suite.CreateGithubEndpoint(endpointParams)
+ suite.NoError(err)
+ suite.Equal(endpoint.Name, endpointParams.Name, "Endpoint name mismatch")
+ suite.Equal(endpoint.Description, endpointParams.Description, "Endpoint description mismatch")
+ suite.Equal(endpoint.BaseURL, endpointParams.BaseURL, "Endpoint base URL mismatch")
+ suite.Equal(endpoint.APIBaseURL, endpointParams.APIBaseURL, "Endpoint API base URL mismatch")
+ suite.Equal(endpoint.UploadBaseURL, endpointParams.UploadBaseURL, "Endpoint upload base URL mismatch")
+ suite.Equal(string(endpoint.CACertBundle), string(caBundle), "Endpoint CA cert bundle mismatch")
+
+ endpoint2 := suite.GetGithubEndpoint(endpointParams.Name)
+ suite.NotNil(endpoint, "endpoint is nil")
+ suite.NotNil(endpoint2, "endpoint2 is nil")
+
+ err = checkEndpointParamsAreEqual(*endpoint, *endpoint2)
+ suite.NoError(err, "endpoint params are not equal")
+ endpoints := suite.ListGithubEndpoints()
+ suite.NoError(err, "error listing github endpoints")
+ var found bool
+ for _, ep := range endpoints {
+ if ep.Name == endpointParams.Name {
+ checkEndpointParamsAreEqual(*endpoint, ep)
+ found = true
+ break
+ }
+ }
+ suite.Equal(found, true, "endpoint not found in list")
+
+ err = suite.DeleteGithubEndpoint(endpoint.Name)
+ suite.NoError(err, "error deleting github endpoint")
+}
+
+func (suite *GarmSuite) TestGithubEndpointMustFailToDeleteDefaultGithubEndpoint() {
+ t := suite.T()
+ t.Log("Testing error when deleting default github.com endpoint")
+ err := deleteGithubEndpoint(suite.cli, suite.authToken, "github.com")
+ suite.Error(err, "expected error when attempting to delete the default github.com endpoint")
+}
+
+func (suite *GarmSuite) TestGithubEndpointFailsOnInvalidCABundle() {
+ t := suite.T()
+ t.Log("Testing endpoint creation with invalid CA cert bundle")
+ badCABundle, err := getTestFileContents("certs/srv-key.pem")
+ suite.NoError(err, "error reading CA cert bundle")
+
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "dummy",
+ Description: "Dummy endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ CACertBundle: badCABundle,
+ }
+
+ _, err = createGithubEndpoint(suite.cli, suite.authToken, endpointParams)
+ suite.Error(err, "expected error when creating endpoint with invalid CA cert bundle")
+}
+
+func (suite *GarmSuite) TestGithubEndpointDeletionFailsWhenCredentialsExist() {
+ t := suite.T()
+ t.Log("Testing endpoint deletion when credentials exist")
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "dummy",
+ Description: "Dummy endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ }
+
+ endpoint, err := suite.CreateGithubEndpoint(endpointParams)
+ suite.NoError(err, "error creating github endpoint")
+ creds, err := suite.createDummyCredentials("test-creds", endpoint.Name)
+ suite.NoError(err, "error creating dummy credentials")
+
+ err = deleteGithubEndpoint(suite.cli, suite.authToken, endpoint.Name)
+ suite.Error(err, "expected error when deleting endpoint with credentials")
+
+ err = suite.DeleteGithubCredential(int64(creds.ID)) //nolint:gosec
+ suite.NoError(err, "error deleting credentials")
+ err = suite.DeleteGithubEndpoint(endpoint.Name)
+ suite.NoError(err, "error deleting endpoint")
+}
+
+func (suite *GarmSuite) TestGithubEndpointFailsOnDuplicateName() {
+ t := suite.T()
+ t.Log("Testing endpoint creation with duplicate name")
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "github.com",
+ Description: "Dummy endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ }
+
+ _, err := createGithubEndpoint(suite.cli, suite.authToken, endpointParams)
+ suite.Error(err, "expected error when creating endpoint with duplicate name")
+}
+
+func (suite *GarmSuite) TestGithubEndpointUpdateEndpoint() {
+ t := suite.T()
+ t.Log("Testing endpoint update")
+ endpoint, err := suite.createDummyEndpoint("dummy")
+ suite.NoError(err, "error creating dummy endpoint")
+ t.Cleanup(func() {
+ suite.DeleteGithubEndpoint(endpoint.Name)
+ })
+
+ newDescription := "Updated description"
+ newBaseURL := "https://ghes2.example.com"
+ newAPIBaseURL := "https://api.ghes2.example.com/"
+ newUploadBaseURL := "https://uploads.ghes2.example.com/"
+ newCABundle, err := getTestFileContents("certs/srv-pub.pem")
+ suite.NoError(err, "error reading CA cert bundle")
+
+ updateParams := params.UpdateGithubEndpointParams{
+ Description: &newDescription,
+ BaseURL: &newBaseURL,
+ APIBaseURL: &newAPIBaseURL,
+ UploadBaseURL: &newUploadBaseURL,
+ CACertBundle: newCABundle,
+ }
+
+ updated, err := updateGithubEndpoint(suite.cli, suite.authToken, endpoint.Name, updateParams)
+ suite.NoError(err, "error updating github endpoint")
+
+ suite.Equal(updated.Name, endpoint.Name, "Endpoint name mismatch")
+ suite.Equal(updated.Description, newDescription, "Endpoint description mismatch")
+ suite.Equal(updated.BaseURL, newBaseURL, "Endpoint base URL mismatch")
+ suite.Equal(updated.APIBaseURL, newAPIBaseURL, "Endpoint API base URL mismatch")
+ suite.Equal(updated.UploadBaseURL, newUploadBaseURL, "Endpoint upload base URL mismatch")
+ suite.Equal(string(updated.CACertBundle), string(newCABundle), "Endpoint CA cert bundle mismatch")
+}
+
+func (suite *GarmSuite) MustDefaultGithubEndpoint() {
+ ep := suite.GetGithubEndpoint("github.com")
+
+ suite.NotNil(ep, "default GitHub endpoint not found")
+ suite.Equal(ep.Name, "github.com", "default GitHub endpoint name mismatch")
+}
+
+func (suite *GarmSuite) GetGithubEndpoint(name string) *params.ForgeEndpoint {
+ t := suite.T()
+ t.Log("Get GitHub endpoint")
+ endpoint, err := getGithubEndpoint(suite.cli, suite.authToken, name)
+ suite.NoError(err, "error getting GitHub endpoint")
+
+ return endpoint
+}
+
+func (suite *GarmSuite) CreateGithubEndpoint(params params.CreateGithubEndpointParams) (*params.ForgeEndpoint, error) {
+ t := suite.T()
+ t.Log("Create GitHub endpoint")
+ endpoint, err := createGithubEndpoint(suite.cli, suite.authToken, params)
+ suite.NoError(err, "error creating GitHub endpoint")
+
+ return endpoint, nil
+}
+
+func (suite *GarmSuite) DeleteGithubEndpoint(name string) error {
+ t := suite.T()
+ t.Log("Delete GitHub endpoint")
+ err := deleteGithubEndpoint(suite.cli, suite.authToken, name)
+ suite.NoError(err, "error deleting GitHub endpoint")
+
+ return nil
+}
+
+func (suite *GarmSuite) ListGithubEndpoints() params.ForgeEndpoints {
+ t := suite.T()
+ t.Log("List GitHub endpoints")
+ endpoints, err := listGithubEndpoints(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing GitHub endpoints")
+
+ return endpoints
+}
+
+func (suite *GarmSuite) createDummyEndpoint(name string) (*params.ForgeEndpoint, error) {
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: name,
+ Description: "Dummy endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ }
+
+ return suite.CreateGithubEndpoint(endpointParams)
+}
diff --git a/test/integration/external_provider_test.go b/test/integration/external_provider_test.go
new file mode 100644
index 00000000..2c85eb35
--- /dev/null
+++ b/test/integration/external_provider_test.go
@@ -0,0 +1,184 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/go-openapi/runtime"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/client"
+ clientInstances "github.com/cloudbase/garm/client/instances"
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestExternalProvider() {
+ t := suite.T()
+ t.Log("Testing external provider")
+ repoPoolParams2 := params.CreatePoolParams{
+ MaxRunners: 2,
+ MinIdleRunners: 0,
+ Flavor: "default",
+ Image: "ubuntu:24.04",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ ProviderName: "test_external",
+ Tags: []string{"repo-runner-2"},
+ Enabled: true,
+ }
+ repoPool2 := suite.CreateRepoPool(suite.repo.ID, repoPoolParams2)
+ newParams := suite.UpdateRepoPool(suite.repo.ID, repoPool2.ID, repoPoolParams2.MaxRunners, 1)
+ t.Logf("Updated repo pool with pool_id %s with new_params %+v", repoPool2.ID, newParams)
+
+ err := suite.WaitPoolInstances(repoPool2.ID, commonParams.InstanceRunning, params.RunnerPending, 1*time.Minute)
+ suite.NoError(err, "error waiting for pool instances to be running")
+ repoPool2 = suite.GetRepoPool(suite.repo.ID, repoPool2.ID)
+ suite.DisableRepoPool(suite.repo.ID, repoPool2.ID)
+ suite.DeleteInstance(repoPool2.Instances[0].Name, false, false)
+ err = suite.WaitPoolInstances(repoPool2.ID, commonParams.InstancePendingDelete, params.RunnerPending, 1*time.Minute)
+ suite.NoError(err, "error waiting for pool instances to be pending delete")
+ suite.DeleteInstance(repoPool2.Instances[0].Name, true, false) // delete instance with forceRemove
+ err = suite.WaitInstanceToBeRemoved(repoPool2.Instances[0].Name, 1*time.Minute)
+ suite.NoError(err, "error waiting for instance to be removed")
+ suite.DeleteRepoPool(suite.repo.ID, repoPool2.ID)
+}
+
+func (suite *GarmSuite) WaitPoolInstances(poolID string, status commonParams.InstanceStatus, runnerStatus params.RunnerStatus, timeout time.Duration) error {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+
+ pool, err := getPool(suite.cli, suite.authToken, poolID)
+ if err != nil {
+ return err
+ }
+
+ t.Logf("Waiting for pool instances with pool_id %s to reach desired status %v and desired_runner_status %v", poolID, status, runnerStatus)
+ for timeWaited < timeout {
+ poolInstances, err := listPoolInstances(suite.cli, suite.authToken, poolID)
+ if err != nil {
+ return err
+ }
+
+ instancesCount := 0
+ for _, instance := range poolInstances {
+ if instance.Status == status && instance.RunnerStatus == runnerStatus {
+ instancesCount++
+ }
+ }
+
+ t.Logf(
+ "Pool instance with pool_id %s reached status %v and runner_status %v, desired_instance_count %d, pool_instance_count %d",
+ poolID, status, runnerStatus, instancesCount,
+ len(poolInstances))
+ if pool.MinIdleRunnersAsInt() == instancesCount {
+ return nil
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ err = suite.dumpPoolInstancesDetails(pool.ID)
+ suite.NoError(err, "error dumping pool instances details")
+
+ return fmt.Errorf("timeout waiting for pool %s instances to reach status: %s and runner status: %s", poolID, status, runnerStatus)
+}
+
+func (suite *GarmSuite) dumpPoolInstancesDetails(poolID string) error {
+ t := suite.T()
+ pool, err := getPool(suite.cli, suite.authToken, poolID)
+ if err != nil {
+ return err
+ }
+ if err := printJSONResponse(pool); err != nil {
+ return err
+ }
+ for _, instance := range pool.Instances {
+ instanceDetails, err := getInstance(suite.cli, suite.authToken, instance.Name)
+ if err != nil {
+ return err
+ }
+ t.Logf("Instance details: instance_name %s", instance.Name)
+ if err := printJSONResponse(instanceDetails); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (suite *GarmSuite) DisableRepoPool(repoID, repoPoolID string) {
+ t := suite.T()
+ t.Logf("Disable repo pool with repo_id %s and pool_id %s", repoID, repoPoolID)
+ enabled := false
+ poolParams := params.UpdatePoolParams{Enabled: &enabled}
+ _, err := updateRepoPool(suite.cli, suite.authToken, repoID, repoPoolID, poolParams)
+ suite.NoError(err, "error disabling repository pool")
+}
+
+func (suite *GarmSuite) DeleteInstance(name string, forceRemove, bypassGHUnauthorized bool) {
+ t := suite.T()
+ t.Logf("Delete instance %s with force_remove %t", name, forceRemove)
+ err := deleteInstance(suite.cli, suite.authToken, name, forceRemove, bypassGHUnauthorized)
+ suite.NoError(err, "error deleting instance", name)
+ t.Logf("Instance deletion initiated for instance %s", name)
+}
+
+func (suite *GarmSuite) WaitInstanceToBeRemoved(name string, timeout time.Duration) error {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var instance *params.Instance
+
+ t.Logf("Waiting for instance %s to be removed", name)
+ for timeWaited < timeout {
+ instances, err := listInstances(suite.cli, suite.authToken)
+ if err != nil {
+ return err
+ }
+
+ instance = nil
+ for k, v := range instances {
+ if v.Name == name {
+ instance = &instances[k]
+ break
+ }
+ }
+ if instance == nil {
+ // The instance is not found in the list. We can safely assume
+ // that it is removed
+ return nil
+ }
+
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ if err := printJSONResponse(*instance); err != nil {
+ return err
+ }
+ return fmt.Errorf("instance %s was not removed within the timeout", name)
+}
+
+func listPoolInstances(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, poolID string) (params.Instances, error) {
+ listPoolInstancesResponse, err := apiCli.Instances.ListPoolInstances(
+ clientInstances.NewListPoolInstancesParams().WithPoolID(poolID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listPoolInstancesResponse.Payload, nil
+}
diff --git a/test/integration/gh_cleanup/main.go b/test/integration/gh_cleanup/main.go
new file mode 100644
index 00000000..86d39ea7
--- /dev/null
+++ b/test/integration/gh_cleanup/main.go
@@ -0,0 +1,188 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "os"
+
+ "github.com/google/go-github/v72/github"
+ "golang.org/x/oauth2"
+)
+
+var (
+ orgName = os.Getenv("ORG_NAME")
+ repoName = os.Getenv("REPO_NAME")
+
+ ghToken = os.Getenv("GH_TOKEN")
+)
+
+func main() {
+ controllerID, ctrlIDFound := os.LookupEnv("GARM_CONTROLLER_ID")
+ if ctrlIDFound {
+ _ = GhOrgRunnersCleanup(ghToken, orgName, controllerID)
+ _ = GhRepoRunnersCleanup(ghToken, orgName, repoName, controllerID)
+ } else {
+ slog.Warn("Env variable GARM_CONTROLLER_ID is not set, skipping GitHub runners cleanup")
+ }
+
+ baseURL, baseURLFound := os.LookupEnv("GARM_BASE_URL")
+ if ctrlIDFound && baseURLFound {
+ webhookURL := fmt.Sprintf("%s/webhooks/%s", baseURL, controllerID)
+ _ = GhOrgWebhookCleanup(ghToken, webhookURL, orgName)
+ _ = GhRepoWebhookCleanup(ghToken, webhookURL, orgName, repoName)
+ } else {
+ slog.Warn("Env variables GARM_CONTROLLER_ID & GARM_BASE_URL are not set, skipping webhooks cleanup")
+ }
+}
+
+func GhOrgRunnersCleanup(ghToken, orgName, controllerID string) error {
+ slog.Info("Cleanup Github runners", "controller_id", controllerID, "org_name", orgName)
+
+ client := getGithubClient(ghToken)
+ ghOrgRunners, _, err := client.Actions.ListOrganizationRunners(context.Background(), orgName, nil)
+ if err != nil {
+ return err
+ }
+
+ // Remove organization runners
+ controllerLabel := fmt.Sprintf("runner-controller-id:%s", controllerID)
+ for _, orgRunner := range ghOrgRunners.Runners {
+ for _, label := range orgRunner.Labels {
+ if label.GetName() == controllerLabel {
+ if _, err := client.Actions.RemoveOrganizationRunner(context.Background(), orgName, orgRunner.GetID()); err != nil {
+ // We don't fail if we can't remove a single runner. This
+ // is a best effort to try and remove all the orphan runners.
+ slog.With(slog.Any("error", err)).Info("Failed to remove organization runner", "org_runner", orgRunner.GetName())
+ break
+ }
+ slog.Info("Removed organization runner", "org_runner", orgRunner.GetName())
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func GhRepoRunnersCleanup(ghToken, orgName, repoName, controllerID string) error {
+ slog.Info("Cleanup Github runners", "controller_id", controllerID, "org_name", orgName, "repo_name", repoName)
+
+ client := getGithubClient(ghToken)
+ ghRepoRunners, _, err := client.Actions.ListRunners(context.Background(), orgName, repoName, nil)
+ if err != nil {
+ return err
+ }
+
+ // Remove repository runners
+ controllerLabel := fmt.Sprintf("runner-controller-id:%s", controllerID)
+ for _, repoRunner := range ghRepoRunners.Runners {
+ for _, label := range repoRunner.Labels {
+ if label.GetName() == controllerLabel {
+ if _, err := client.Actions.RemoveRunner(context.Background(), orgName, repoName, repoRunner.GetID()); err != nil {
+ // We don't fail if we can't remove a single runner. This
+ // is a best effort to try and remove all the orphan runners.
+ slog.With(slog.Any("error", err)).Error("Failed to remove repository runner", "runner_name", repoRunner.GetName())
+ break
+ }
+ slog.Info("Removed repository runner", "runner_name", repoRunner.GetName())
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func GhOrgWebhookCleanup(ghToken, webhookURL, orgName string) error {
+ slog.Info("Cleanup Github webhook", "webhook_url", webhookURL, "org_name", orgName)
+ hook, err := getGhOrgWebhook(webhookURL, ghToken, orgName)
+ if err != nil {
+ return err
+ }
+
+ // Remove organization webhook
+ if hook != nil {
+ client := getGithubClient(ghToken)
+ if _, err := client.Organizations.DeleteHook(context.Background(), orgName, hook.GetID()); err != nil {
+ return err
+ }
+ slog.Info("Github webhook removed", "webhook_url", webhookURL, "org_name", orgName)
+ }
+
+ return nil
+}
+
+func GhRepoWebhookCleanup(ghToken, webhookURL, orgName, repoName string) error {
+ slog.Info("Cleanup Github webhook", "webhook_url", webhookURL, "org_name", orgName, "repo_name", repoName)
+
+ hook, err := getGhRepoWebhook(webhookURL, ghToken, orgName, repoName)
+ if err != nil {
+ return err
+ }
+
+ // Remove repository webhook
+ if hook != nil {
+ client := getGithubClient(ghToken)
+ if _, err := client.Repositories.DeleteHook(context.Background(), orgName, repoName, hook.GetID()); err != nil {
+ return err
+ }
+ slog.Info("Github webhook with", "webhook_url", webhookURL, "org_name", orgName, "repo_name", repoName)
+ }
+
+ return nil
+}
+
+func getGhOrgWebhook(url, ghToken, orgName string) (*github.Hook, error) {
+ client := getGithubClient(ghToken)
+ ghOrgHooks, _, err := client.Organizations.ListHooks(context.Background(), orgName, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range ghOrgHooks {
+ hookURL := hook.Config.GetURL()
+ if hookURL == url {
+ return hook, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func getGhRepoWebhook(url, ghToken, orgName, repoName string) (*github.Hook, error) {
+ client := getGithubClient(ghToken)
+ ghRepoHooks, _, err := client.Repositories.ListHooks(context.Background(), orgName, repoName, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range ghRepoHooks {
+ hookURL := hook.Config.GetURL()
+ if hookURL == url {
+ return hook, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func getGithubClient(oauthToken string) *github.Client {
+ ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: oauthToken})
+ tc := oauth2.NewClient(context.Background(), ts)
+ return github.NewClient(tc)
+}
diff --git a/test/integration/jobs_test.go b/test/integration/jobs_test.go
new file mode 100644
index 00000000..4b2d9d5d
--- /dev/null
+++ b/test/integration/jobs_test.go
@@ -0,0 +1,181 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestWorkflowJobs() {
+ suite.TriggerWorkflow(suite.ghToken, orgName, repoName, workflowFileName, "org-runner")
+ suite.ValidateJobLifecycle("org-runner")
+
+ suite.TriggerWorkflow(suite.ghToken, orgName, repoName, workflowFileName, "repo-runner")
+ suite.ValidateJobLifecycle("repo-runner")
+}
+
+func (suite *GarmSuite) TriggerWorkflow(ghToken, orgName, repoName, workflowFileName, labelName string) {
+ t := suite.T()
+ t.Logf("Trigger workflow with label %s", labelName)
+
+ client := getGithubClient(ghToken)
+ eventReq := github.CreateWorkflowDispatchEventRequest{
+ Ref: "main",
+ Inputs: map[string]interface{}{
+ "sleep_time": "50",
+ "runner_label": labelName,
+ },
+ }
+ _, err := client.Actions.CreateWorkflowDispatchEventByFileName(context.Background(), orgName, repoName, workflowFileName, eventReq)
+ suite.NoError(err, "error triggering workflow")
+}
+
+func (suite *GarmSuite) ValidateJobLifecycle(label string) {
+ t := suite.T()
+ t.Logf("Validate GARM job lifecycle with label %s", label)
+
+ // wait for job list to be updated
+ job, err := suite.waitLabelledJob(label, 4*time.Minute)
+ suite.NoError(err, "error waiting for job to be created")
+
+ // check expected job status
+ job, err = suite.waitJobStatus(job.ID, params.JobStatusQueued, 4*time.Minute)
+ suite.NoError(err, "error waiting for job to be queued")
+
+ job, err = suite.waitJobStatus(job.ID, params.JobStatusInProgress, 4*time.Minute)
+ suite.NoError(err, "error waiting for job to be in progress")
+
+ // check expected instance status
+ instance, err := suite.waitInstanceStatus(job.RunnerName, commonParams.InstanceRunning, params.RunnerActive, 5*time.Minute)
+ suite.NoError(err, "error waiting for instance to be running")
+
+ // wait for job to be completed
+ _, err = suite.waitJobStatus(job.ID, params.JobStatusCompleted, 4*time.Minute)
+ suite.NoError(err, "error waiting for job to be completed")
+
+ // wait for instance to be removed
+ err = suite.WaitInstanceToBeRemoved(instance.Name, 5*time.Minute)
+ suite.NoError(err, "error waiting for instance to be removed")
+
+ // wait for GARM to rebuild the pool running idle instances
+ err = suite.WaitPoolInstances(instance.PoolID, commonParams.InstanceRunning, params.RunnerIdle, 5*time.Minute)
+ suite.NoError(err, "error waiting for pool instances to be running idle")
+}
+
+func (suite *GarmSuite) waitLabelledJob(label string, timeout time.Duration) (*params.Job, error) {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var jobs params.Jobs
+ var err error
+
+ t.Logf("Waiting for job with label %s", label)
+ for timeWaited < timeout {
+ jobs, err = listJobs(suite.cli, suite.authToken)
+ if err != nil {
+ return nil, err
+ }
+ for _, job := range jobs {
+ for _, jobLabel := range job.Labels {
+ if jobLabel == label {
+ return &job, err
+ }
+ }
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ if err := printJSONResponse(jobs); err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("failed to wait job with label %s", label)
+}
+
+func (suite *GarmSuite) waitJobStatus(id int64, status params.JobStatus, timeout time.Duration) (*params.Job, error) {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var job *params.Job
+
+ t.Logf("Waiting for job %d to reach status %v", id, status)
+ for timeWaited < timeout {
+ jobs, err := listJobs(suite.cli, suite.authToken)
+ if err != nil {
+ return nil, err
+ }
+
+ job = nil
+ for k, v := range jobs {
+ if v.ID == id {
+ job = &jobs[k]
+ break
+ }
+ }
+
+ if job == nil {
+ if status == params.JobStatusCompleted {
+ // The job is not found in the list. We can safely assume
+ // that it is completed
+ return nil, nil
+ }
+ // if the job is not found, and expected status is not "completed",
+ // we need to error out.
+ return nil, fmt.Errorf("job %d not found, expected to be found in status %s", id, status)
+ } else if job.Status == string(status) {
+ return job, nil
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ if err := printJSONResponse(*job); err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("timeout waiting for job %d to reach status %s", id, status)
+}
+
+func (suite *GarmSuite) waitInstanceStatus(name string, status commonParams.InstanceStatus, runnerStatus params.RunnerStatus, timeout time.Duration) (*params.Instance, error) {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var instance *params.Instance
+ var err error
+
+ t.Logf("Waiting for instance %s to reach desired status %v and desired runner status %v", name, status, runnerStatus)
+ for timeWaited < timeout {
+ instance, err = getInstance(suite.cli, suite.authToken, name)
+ if err != nil {
+ return nil, err
+ }
+ t.Logf("Instance %s has status %v and runner status %v", name, instance.Status, instance.RunnerStatus)
+ if instance.Status == status && instance.RunnerStatus == runnerStatus {
+ return instance, nil
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ if err := printJSONResponse(*instance); err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("timeout waiting for instance %s status to reach status %s and runner status %s", name, status, runnerStatus)
+}
diff --git a/test/integration/list_info_test.go b/test/integration/list_info_test.go
new file mode 100644
index 00000000..ddb3ff86
--- /dev/null
+++ b/test/integration/list_info_test.go
@@ -0,0 +1,85 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestGetControllerInfo() {
+ controllerInfo := suite.GetControllerInfo()
+ suite.NotEmpty(controllerInfo.ControllerID, "controller ID is empty")
+}
+
+func (suite *GarmSuite) GetMetricsToken() {
+ t := suite.T()
+ t.Log("Get metrics token")
+ metricsToken, err := getMetricsToken(suite.cli, suite.authToken)
+ suite.NoError(err, "error getting metrics token")
+ suite.NotEmpty(metricsToken, "metrics token is empty")
+}
+
+func (suite *GarmSuite) GetControllerInfo() *params.ControllerInfo {
+ t := suite.T()
+ t.Log("Get controller info")
+ controllerInfo, err := getControllerInfo(suite.cli, suite.authToken)
+ suite.NoError(err, "error getting controller info")
+ err = suite.appendCtrlInfoToGitHubEnv(&controllerInfo)
+ suite.NoError(err, "error appending controller info to GitHub env")
+ err = printJSONResponse(controllerInfo)
+ suite.NoError(err, "error printing controller info")
+ return &controllerInfo
+}
+
+func (suite *GarmSuite) TestListCredentials() {
+ t := suite.T()
+ t.Log("List credentials")
+ credentials, err := listCredentials(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing credentials")
+ suite.NotEmpty(credentials, "credentials list is empty")
+}
+
+func (suite *GarmSuite) TestListProviders() {
+ t := suite.T()
+ t.Log("List providers")
+ providers, err := listProviders(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing providers")
+ suite.NotEmpty(providers, "providers list is empty")
+}
+
+func (suite *GarmSuite) appendCtrlInfoToGitHubEnv(controllerInfo *params.ControllerInfo) error {
+ t := suite.T()
+ envFile, found := os.LookupEnv("GITHUB_ENV")
+ if !found {
+ t.Log("GITHUB_ENV not set, skipping appending controller info")
+ return nil
+ }
+ file, err := os.OpenFile(envFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0o644)
+ if err != nil {
+ return err
+ }
+ t.Cleanup(func() {
+ file.Close()
+ })
+ if _, err := file.WriteString(fmt.Sprintf("export GARM_CONTROLLER_ID=%s\n", controllerInfo.ControllerID)); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/test/integration/organizations_test.go b/test/integration/organizations_test.go
new file mode 100644
index 00000000..d587f4a5
--- /dev/null
+++ b/test/integration/organizations_test.go
@@ -0,0 +1,205 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestOrganizations() {
+ organization := suite.CreateOrg(orgName, suite.credentialsName, orgWebhookSecret)
+ org := suite.UpdateOrg(organization.ID, fmt.Sprintf("%s-clone", suite.credentialsName))
+ suite.NotEqual(organization, org, "organization not updated")
+ orgHookInfo := suite.InstallOrgWebhook(org.ID)
+ suite.ValidateOrgWebhookInstalled(suite.ghToken, orgHookInfo.URL, orgName)
+ suite.UninstallOrgWebhook(org.ID)
+ suite.ValidateOrgWebhookUninstalled(suite.ghToken, orgHookInfo.URL, orgName)
+ _ = suite.InstallOrgWebhook(org.ID)
+ suite.ValidateOrgWebhookInstalled(suite.ghToken, orgHookInfo.URL, orgName)
+
+ orgPoolParams := params.CreatePoolParams{
+ MaxRunners: 2,
+ MinIdleRunners: 0,
+ Flavor: "default",
+ Image: "ubuntu:24.04",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ ProviderName: "lxd_local",
+ Tags: []string{"org-runner"},
+ Enabled: true,
+ }
+ orgPool := suite.CreateOrgPool(org.ID, orgPoolParams)
+ orgPoolGot := suite.GetOrgPool(org.ID, orgPool.ID)
+ suite.Equal(orgPool, orgPoolGot, "organization pool mismatch")
+ suite.DeleteOrgPool(org.ID, orgPool.ID)
+
+ orgPool = suite.CreateOrgPool(org.ID, orgPoolParams)
+ orgPoolUpdated := suite.UpdateOrgPool(org.ID, orgPool.ID, orgPoolParams.MaxRunners, 1)
+ suite.NotEqual(orgPool, orgPoolUpdated, "organization pool not updated")
+
+ suite.WaitOrgRunningIdleInstances(org.ID, 6*time.Minute)
+}
+
+func (suite *GarmSuite) CreateOrg(orgName, credentialsName, orgWebhookSecret string) *params.Organization {
+ t := suite.T()
+ t.Logf("Create org with org_name %s", orgName)
+ orgParams := params.CreateOrgParams{
+ Name: orgName,
+ CredentialsName: credentialsName,
+ WebhookSecret: orgWebhookSecret,
+ }
+ org, err := createOrg(suite.cli, suite.authToken, orgParams)
+ suite.NoError(err, "error creating organization")
+ return org
+}
+
+func (suite *GarmSuite) UpdateOrg(id, credentialsName string) *params.Organization {
+ t := suite.T()
+ t.Logf("Update org with org_id %s", id)
+ updateParams := params.UpdateEntityParams{
+ CredentialsName: credentialsName,
+ }
+ org, err := updateOrg(suite.cli, suite.authToken, id, updateParams)
+ suite.NoError(err, "error updating organization")
+ return org
+}
+
+func (suite *GarmSuite) InstallOrgWebhook(id string) *params.HookInfo {
+ t := suite.T()
+ t.Logf("Install org webhook with org_id %s", id)
+ webhookParams := params.InstallWebhookParams{
+ WebhookEndpointType: params.WebhookEndpointDirect,
+ }
+ _, err := installOrgWebhook(suite.cli, suite.authToken, id, webhookParams)
+ suite.NoError(err, "error installing organization webhook")
+ webhookInfo, err := getOrgWebhook(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error getting organization webhook")
+ return webhookInfo
+}
+
+func (suite *GarmSuite) ValidateOrgWebhookInstalled(ghToken, url, orgName string) {
+ hook, err := getGhOrgWebhook(url, ghToken, orgName)
+ suite.NoError(err, "error getting github webhook")
+ suite.NotNil(hook, "github webhook with url %s, for org %s was not properly installed", url, orgName)
+}
+
+func getGhOrgWebhook(url, ghToken, orgName string) (*github.Hook, error) {
+ client := getGithubClient(ghToken)
+ ghOrgHooks, _, err := client.Organizations.ListHooks(context.Background(), orgName, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range ghOrgHooks {
+ hookURL := hook.Config.GetURL()
+ if hookURL == url {
+ return hook, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func (suite *GarmSuite) UninstallOrgWebhook(id string) {
+ t := suite.T()
+ t.Logf("Uninstall org webhook with org_id %s", id)
+ err := uninstallOrgWebhook(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error uninstalling organization webhook")
+}
+
+func (suite *GarmSuite) ValidateOrgWebhookUninstalled(ghToken, url, orgName string) {
+ hook, err := getGhOrgWebhook(url, ghToken, orgName)
+ suite.NoError(err, "error getting github webhook")
+ suite.Nil(hook, "github webhook with url %s, for org %s was not properly uninstalled", url, orgName)
+}
+
+func (suite *GarmSuite) CreateOrgPool(orgID string, poolParams params.CreatePoolParams) *params.Pool {
+ t := suite.T()
+ t.Logf("Create org pool with org_id %s", orgID)
+ pool, err := createOrgPool(suite.cli, suite.authToken, orgID, poolParams)
+ suite.NoError(err, "error creating organization pool")
+ return pool
+}
+
+func (suite *GarmSuite) GetOrgPool(orgID, orgPoolID string) *params.Pool {
+ t := suite.T()
+ t.Logf("Get org pool with org_id %s and pool_id %s", orgID, orgPoolID)
+ pool, err := getOrgPool(suite.cli, suite.authToken, orgID, orgPoolID)
+ suite.NoError(err, "error getting organization pool")
+ return pool
+}
+
+func (suite *GarmSuite) DeleteOrgPool(orgID, orgPoolID string) {
+ t := suite.T()
+ t.Logf("Delete org pool with org_id %s and pool_id %s", orgID, orgPoolID)
+ err := deleteOrgPool(suite.cli, suite.authToken, orgID, orgPoolID)
+ suite.NoError(err, "error deleting organization pool")
+}
+
+func (suite *GarmSuite) UpdateOrgPool(orgID, orgPoolID string, maxRunners, minIdleRunners uint) *params.Pool {
+ t := suite.T()
+ t.Logf("Update org pool with org_id %s and pool_id %s", orgID, orgPoolID)
+ poolParams := params.UpdatePoolParams{
+ MinIdleRunners: &minIdleRunners,
+ MaxRunners: &maxRunners,
+ }
+ pool, err := updateOrgPool(suite.cli, suite.authToken, orgID, orgPoolID, poolParams)
+ suite.NoError(err, "error updating organization pool")
+ return pool
+}
+
+func (suite *GarmSuite) WaitOrgRunningIdleInstances(orgID string, timeout time.Duration) {
+ t := suite.T()
+ orgPools, err := listOrgPools(suite.cli, suite.authToken, orgID)
+ suite.NoError(err, "error listing organization pools")
+ for _, pool := range orgPools {
+ err := suite.WaitPoolInstances(pool.ID, commonParams.InstanceRunning, params.RunnerIdle, timeout)
+ if err != nil {
+ suite.dumpOrgInstancesDetails(orgID)
+ t.Errorf("timeout waiting for organization %s instances to reach status: %s and runner status: %s", orgID, commonParams.InstanceRunning, params.RunnerIdle)
+ }
+ }
+}
+
+func (suite *GarmSuite) dumpOrgInstancesDetails(orgID string) {
+ t := suite.T()
+ // print org details
+ t.Logf("Dumping org details with org_id %s", orgID)
+ org, err := getOrg(suite.cli, suite.authToken, orgID)
+ suite.NoError(err, "error getting organization")
+ err = printJSONResponse(org)
+ suite.NoError(err, "error printing organization")
+
+ // print org instances details
+ t.Logf("Dumping org instances details for org %s", orgID)
+ instances, err := listOrgInstances(suite.cli, suite.authToken, orgID)
+ suite.NoError(err, "error listing organization instances")
+ for _, instance := range instances {
+ instance, err := getInstance(suite.cli, suite.authToken, instance.Name)
+ suite.NoError(err, "error getting instance")
+ t.Logf("Instance info for instace %s", instance.Name)
+ err = printJSONResponse(instance)
+ suite.NoError(err, "error printing instance")
+ }
+}
diff --git a/test/integration/provider/garm-external-provider b/test/integration/provider/garm-external-provider
new file mode 100755
index 00000000..88e6f46e
--- /dev/null
+++ b/test/integration/provider/garm-external-provider
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+set -e
+set -o pipefail
+
+if [ ! -t 0 ]
+then
+ INPUT=$(cat -)
+fi
+
+if [ -z "$GARM_PROVIDER_CONFIG_FILE" ]
+then
+ echo "no config file specified in env"
+ exit 1
+fi
+
+source "$GARM_PROVIDER_CONFIG_FILE"
+
+function CreateInstance() {
+ if [ -z "$INPUT" ]; then
+ echo "expected build params in stdin"
+ exit 1
+ fi
+
+ jq -rnc '{"provider_id": "test-provider-id", "name": "test-instance-name", "os_type": "linux", "os_name": "ubuntu", "os_version": "20.04", "os_arch": "x86_64", "status": "running"}'
+}
+
+case "$GARM_COMMAND" in
+ "CreateInstance")
+ CreateInstance
+ ;;
+ "DeleteInstance")
+ echo "RemoveAllInstances not implemented"
+ exit 1
+ ;;
+ "GetInstance")
+ echo "Get instance with id: ${GARM_INSTANCE_ID}"
+ ;;
+ "ListInstances")
+ echo "List instances with pool id: ${GARM_POOL_ID}"
+ ;;
+ "StartInstance")
+ echo "Start instance: ${GARM_INSTANCE_NAME} with id: ${GARM_INSTANCE_ID}"
+ ;;
+ "StopInstance")
+ echo "Stop instance: ${GARM_INSTANCE_NAME} with id: ${GARM_INSTANCE_ID}"
+ ;;
+ "RemoveAllInstances")
+ echo "RemoveAllInstances not implemented"
+ exit 1
+ ;;
+ *)
+ echo "Invalid GARM provider command: \"$GARM_COMMAND\""
+ exit 1
+ ;;
+esac
diff --git a/test/integration/repositories_test.go b/test/integration/repositories_test.go
new file mode 100644
index 00000000..1b0558f9
--- /dev/null
+++ b/test/integration/repositories_test.go
@@ -0,0 +1,221 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+ "golang.org/x/oauth2"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) EnsureTestCredentials(name string, oauthToken string, endpointName string) {
+ t := suite.T()
+ t.Log("Ensuring test credentials exist")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: name,
+ Endpoint: endpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: oauthToken,
+ },
+ }
+ suite.CreateGithubCredentials(createCredsParams)
+
+ createCredsParams.Name = fmt.Sprintf("%s-clone", name)
+ suite.CreateGithubCredentials(createCredsParams)
+}
+
+func (suite *GarmSuite) TestRepositories() {
+ t := suite.T()
+
+ t.Logf("Update repo with repo_id %s", suite.repo.ID)
+ updateParams := params.UpdateEntityParams{
+ CredentialsName: fmt.Sprintf("%s-clone", suite.credentialsName),
+ }
+ repo, err := updateRepo(suite.cli, suite.authToken, suite.repo.ID, updateParams)
+ suite.NoError(err, "error updating repository")
+ suite.Equal(fmt.Sprintf("%s-clone", suite.credentialsName), repo.CredentialsName, "credentials name mismatch")
+ suite.repo = repo
+
+ hookRepoInfo := suite.InstallRepoWebhook(suite.repo.ID)
+ suite.ValidateRepoWebhookInstalled(suite.ghToken, hookRepoInfo.URL, orgName, repoName)
+ suite.UninstallRepoWebhook(suite.repo.ID)
+ suite.ValidateRepoWebhookUninstalled(suite.ghToken, hookRepoInfo.URL, orgName, repoName)
+
+ suite.InstallRepoWebhook(suite.repo.ID)
+ suite.ValidateRepoWebhookInstalled(suite.ghToken, hookRepoInfo.URL, orgName, repoName)
+
+ repoPoolParams := params.CreatePoolParams{
+ MaxRunners: 2,
+ MinIdleRunners: 0,
+ Flavor: "default",
+ Image: "ubuntu:24.04",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ ProviderName: "lxd_local",
+ Tags: []string{"repo-runner"},
+ Enabled: true,
+ }
+
+ repoPool := suite.CreateRepoPool(suite.repo.ID, repoPoolParams)
+ suite.Equal(repoPool.MaxRunners, repoPoolParams.MaxRunners, "max runners mismatch")
+ suite.Equal(repoPool.MinIdleRunners, repoPoolParams.MinIdleRunners, "min idle runners mismatch")
+
+ repoPoolGet := suite.GetRepoPool(suite.repo.ID, repoPool.ID)
+ suite.Equal(*repoPool, *repoPoolGet, "pool get mismatch")
+
+ suite.DeleteRepoPool(suite.repo.ID, repoPool.ID)
+
+ repoPool = suite.CreateRepoPool(suite.repo.ID, repoPoolParams)
+ updatedRepoPool := suite.UpdateRepoPool(suite.repo.ID, repoPool.ID, repoPoolParams.MaxRunners, 1)
+ suite.NotEqual(updatedRepoPool.MinIdleRunners, repoPool.MinIdleRunners, "min idle runners mismatch")
+
+ suite.WaitRepoRunningIdleInstances(suite.repo.ID, 6*time.Minute)
+}
+
+func (suite *GarmSuite) InstallRepoWebhook(id string) *params.HookInfo {
+ t := suite.T()
+ t.Logf("Install repo webhook with repo_id %s", id)
+ webhookParams := params.InstallWebhookParams{
+ WebhookEndpointType: params.WebhookEndpointDirect,
+ }
+ _, err := installRepoWebhook(suite.cli, suite.authToken, id, webhookParams)
+ suite.NoError(err, "error installing repository webhook")
+
+ webhookInfo, err := getRepoWebhook(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error getting repository webhook")
+ return webhookInfo
+}
+
+func (suite *GarmSuite) ValidateRepoWebhookInstalled(ghToken, url, orgName, repoName string) {
+ hook, err := getGhRepoWebhook(url, ghToken, orgName, repoName)
+ suite.NoError(err, "error getting github webhook")
+ suite.NotNil(hook, "github webhook with url %s, for repo %s/%s was not properly installed", url, orgName, repoName)
+}
+
+func getGhRepoWebhook(url, ghToken, orgName, repoName string) (*github.Hook, error) {
+ client := getGithubClient(ghToken)
+ ghRepoHooks, _, err := client.Repositories.ListHooks(context.Background(), orgName, repoName, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range ghRepoHooks {
+ hookURL := hook.Config.GetURL()
+ if hookURL == url {
+ return hook, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func getGithubClient(oauthToken string) *github.Client {
+ ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: oauthToken})
+ tc := oauth2.NewClient(context.Background(), ts)
+ return github.NewClient(tc)
+}
+
+func (suite *GarmSuite) UninstallRepoWebhook(id string) {
+ t := suite.T()
+ t.Logf("Uninstall repo webhook with repo_id %s", id)
+ err := uninstallRepoWebhook(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error uninstalling repository webhook")
+}
+
+func (suite *GarmSuite) ValidateRepoWebhookUninstalled(ghToken, url, orgName, repoName string) {
+ hook, err := getGhRepoWebhook(url, ghToken, orgName, repoName)
+ suite.NoError(err, "error getting github webhook")
+ suite.Nil(hook, "github webhook with url %s, for repo %s/%s was not properly uninstalled", url, orgName, repoName)
+}
+
+func (suite *GarmSuite) CreateRepoPool(repoID string, poolParams params.CreatePoolParams) *params.Pool {
+ t := suite.T()
+ t.Logf("Create repo pool with repo_id %s and pool_params %+v", repoID, poolParams)
+ pool, err := createRepoPool(suite.cli, suite.authToken, repoID, poolParams)
+ suite.NoError(err, "error creating repository pool")
+ return pool
+}
+
+func (suite *GarmSuite) GetRepoPool(repoID, repoPoolID string) *params.Pool {
+ t := suite.T()
+ t.Logf("Get repo pool repo_id %s and pool_id %s", repoID, repoPoolID)
+ pool, err := getRepoPool(suite.cli, suite.authToken, repoID, repoPoolID)
+ suite.NoError(err, "error getting repository pool")
+ return pool
+}
+
+func (suite *GarmSuite) DeleteRepoPool(repoID, repoPoolID string) {
+ t := suite.T()
+ t.Logf("Delete repo pool with repo_id %s and pool_id %s", repoID, repoPoolID)
+ err := deleteRepoPool(suite.cli, suite.authToken, repoID, repoPoolID)
+ suite.NoError(err, "error deleting repository pool")
+}
+
+func (suite *GarmSuite) UpdateRepoPool(repoID, repoPoolID string, maxRunners, minIdleRunners uint) *params.Pool {
+ t := suite.T()
+ t.Logf("Update repo pool with repo_id %s and pool_id %s", repoID, repoPoolID)
+ poolParams := params.UpdatePoolParams{
+ MinIdleRunners: &minIdleRunners,
+ MaxRunners: &maxRunners,
+ }
+ pool, err := updateRepoPool(suite.cli, suite.authToken, repoID, repoPoolID, poolParams)
+ suite.NoError(err, "error updating repository pool")
+ return pool
+}
+
+func (suite *GarmSuite) WaitRepoRunningIdleInstances(repoID string, timeout time.Duration) {
+ t := suite.T()
+ repoPools, err := listRepoPools(suite.cli, suite.authToken, repoID)
+ suite.NoError(err, "error listing repo pools")
+ for _, pool := range repoPools {
+ err := suite.WaitPoolInstances(pool.ID, commonParams.InstanceRunning, params.RunnerIdle, timeout)
+ if err != nil {
+ suite.dumpRepoInstancesDetails(repoID)
+ t.Errorf("error waiting for pool instances to be running idle: %v", err)
+ }
+ }
+}
+
+func (suite *GarmSuite) dumpRepoInstancesDetails(repoID string) {
+ t := suite.T()
+ // print repo details
+ t.Logf("Dumping repo details for repo %s", repoID)
+ repo, err := getRepo(suite.cli, suite.authToken, repoID)
+ suite.NoError(err, "error getting repo")
+ err = printJSONResponse(repo)
+ suite.NoError(err, "error printing repo")
+
+ // print repo instances details
+ t.Logf("Dumping repo instances details for repo %s", repoID)
+ instances, err := listRepoInstances(suite.cli, suite.authToken, repoID)
+ suite.NoError(err, "error listing repo instances")
+ for _, instance := range instances {
+ instance, err := getInstance(suite.cli, suite.authToken, instance.Name)
+ suite.NoError(err, "error getting instance")
+ t.Logf("Instance info for instance %s", instance.Name)
+ err = printJSONResponse(instance)
+ suite.NoError(err, "error printing instance")
+ }
+}
diff --git a/test/integration/scripts/setup-garm.sh b/test/integration/scripts/setup-garm.sh
new file mode 100755
index 00000000..40a61943
--- /dev/null
+++ b/test/integration/scripts/setup-garm.sh
@@ -0,0 +1,109 @@
+#!/usr/bin/env bash
+set -o errexit
+
+DIR="$(dirname $0)"
+BINARIES_DIR="$PWD/bin"
+CONTRIB_DIR="$PWD/contrib"
+export CONFIG_DIR="$PWD/test/integration/config"
+export CONFIG_DIR_PROV="$PWD/test/integration/provider"
+export GARM_CONFIG_DIR=${GARM_CONFIG_DIR:-$(mktemp -d)}
+export PROVIDER_BIN_DIR="$GARM_CONFIG_DIR/providers.d/lxd"
+export IS_GH_WORKFLOW=${IS_GH_WORKFLOW:-"true"}
+export LXD_PROVIDER_LOCATION=${LXD_PROVIDER_LOCATION:-""}
+export RUN_USER=${RUN_USER:-$USER}
+export GARM_PORT=${GARM_PORT:-"9997"}
+export GARM_SERVICE_NAME=${GARM_SERVICE_NAME:-"garm"}
+export GARM_CONFIG_FILE=${GARM_CONFIG_FILE:-"${GARM_CONFIG_DIR}/config.toml"}
+export LXD_REMOTE_SERVER=${LXD_REMOTE_SERVER:-"https://cloud-images.ubuntu.com/releases"}
+
+if [ -f "$GITHUB_ENV" ];then
+ echo "export GARM_CONFIG_DIR=${GARM_CONFIG_DIR}" >> $GITHUB_ENV
+ echo "export GARM_SERVICE_NAME=${GARM_SERVICE_NAME}" >> $GITHUB_ENV
+fi
+
+if [[ ! -f $BINARIES_DIR/garm ]] || [[ ! -f $BINARIES_DIR/garm-cli ]]; then
+ echo "ERROR: Please build GARM binaries first"
+ exit 1
+fi
+
+
+if [[ -z $GH_TOKEN ]]; then echo "ERROR: The env variable GH_TOKEN is not set"; exit 1; fi
+if [[ -z $CREDENTIALS_NAME ]]; then echo "ERROR: The env variable CREDENTIALS_NAME is not set"; exit 1; fi
+if [[ -z $GARM_BASE_URL ]]; then echo "ERROR: The env variable GARM_BASE_URL is not set"; exit 1; fi
+
+# Generate a random 32-char secret for JWT_AUTH_SECRET and DB_PASSPHRASE.
+function generate_secret() {
+ (tr -dc 'a-zA-Z0-9!@#$%^&*()_+?><~\`;' < /dev/urandom | head -c 32) 2>/dev/null
+}
+
+# Wait for a port to open at a given address.
+function wait_open_port() {
+ local ADDRESS="$1"
+ local PORT="$2"
+ local TIMEOUT=30
+ SECONDS=0
+ while true; do
+ if [[ $SECONDS -gt $TIMEOUT ]]; then
+ echo "ERROR: Port $PORT didn't open at $ADDRESS within $TIMEOUT seconds"
+ return 1
+ fi
+ nc -v -w 5 -z "$ADDRESS" "$PORT" &>/dev/null && break || sleep 1
+ done
+ echo "Port $PORT at address $ADDRESS is open"
+}
+
+export JWT_AUTH_SECRET="$(generate_secret)"
+export DB_PASSPHRASE="$(generate_secret)"
+
+if [ $IS_GH_WORKFLOW == "true" ]; then
+ # Group "adm" is the LXD daemon group as set by the "canonical/setup-lxd" GitHub action.
+ sudo useradd --shell /usr/bin/false --system --groups adm --no-create-home garm
+fi
+
+sudo mkdir -p ${GARM_CONFIG_DIR}
+sudo mkdir -p $PROVIDER_BIN_DIR
+sudo chown -R $RUN_USER:$RUN_USER ${PROVIDER_BIN_DIR}
+sudo chown -R $RUN_USER:$RUN_USER ${GARM_CONFIG_DIR}
+
+export LXD_PROVIDER_EXECUTABLE="$PROVIDER_BIN_DIR/garm-provider-lxd"
+export LXD_PROVIDER_CONFIG="${GARM_CONFIG_DIR}/garm-provider-lxd.toml"
+cat $CONFIG_DIR/garm-provider-lxd.toml| envsubst | sudo tee $LXD_PROVIDER_CONFIG > /dev/null
+
+function clone_and_build_lxd_provider() {
+ git clone https://github.com/cloudbase/garm-provider-lxd ~/garm-provider-lxd
+ pushd ~/garm-provider-lxd
+ CGO_ENABLED=1 go build -o $LXD_PROVIDER_EXECUTABLE
+ popd
+}
+
+if [ $IS_GH_WORKFLOW == "true" ]; then
+ clone_and_build_lxd_provider
+else
+ if [ -z "$LXD_PROVIDER_LOCATION" ];then
+ clone_and_build_lxd_provider
+ else
+ cp $LXD_PROVIDER_LOCATION $LXD_PROVIDER_EXECUTABLE
+ fi
+
+fi
+
+cat $CONFIG_DIR/config.toml | envsubst | sudo tee ${GARM_CONFIG_DIR}/config.toml > /dev/null
+sudo chown -R $RUN_USER:$RUN_USER ${GARM_CONFIG_DIR}
+
+sudo mkdir -p ${GARM_CONFIG_DIR}/test-provider
+sudo touch $CONFIG_DIR_PROV/config
+sudo cp $CONFIG_DIR_PROV/* ${GARM_CONFIG_DIR}/test-provider
+
+sudo mv $BINARIES_DIR/* /usr/local/bin/
+mkdir -p $HOME/.local/share/systemd/user/
+cat $CONFIG_DIR/garm.service| envsubst | sudo tee /lib/systemd/system/${GARM_SERVICE_NAME}@.service > /dev/null
+sudo chown -R $RUN_USER:$RUN_USER ${GARM_CONFIG_DIR}
+
+sudo systemctl daemon-reload
+sudo systemctl enable ${GARM_SERVICE_NAME}@${RUN_USER}
+sudo systemctl restart ${GARM_SERVICE_NAME}@${RUN_USER}
+wait_open_port 127.0.0.1 ${GARM_PORT}
+
+echo "GARM is up and running"
+echo "GARM config file is $GARM_CONFIG_FILE"
+echo "GARM service name is $GARM_SERVICE_NAME"
diff --git a/test/integration/scripts/taredown_garm.sh b/test/integration/scripts/taredown_garm.sh
new file mode 100755
index 00000000..c7b80a69
--- /dev/null
+++ b/test/integration/scripts/taredown_garm.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+if [ -f "$GITHUB_ENV" ];then
+ source $GITHUB_ENV
+fi
+
+if [ -z $GARM_CONFIG_DIR ]; then
+ echo "ERROR: GARM_CONFIG_DIR is not set"
+ exit 1
+fi
+
+if [ -z $GARM_SERVICE_NAME ]; then
+ echo "ERROR: GARM_SERVICE_NAME is not set"
+ exit 1
+fi
+
+if [ -f "$HOME/.local/share/systemd/user/${GARM_SERVICE_NAME}.service" ];then
+ sudo systemctl stop $GARM_SERVICE_NAME@${RUN_USER}
+ sudo systemctl disable $GARM_SERVICE_NAME@${RUN_USER}
+ sudo rm /lib/systemd/system/${GARM_SERVICE_NAME}@.service
+ sudo systemctl daemon-reload
+fi
+
+if [ -d "$GARM_CONFIG_DIR" ] && [ -f "$GARM_CONFIG_DIR/config.toml" ] && [ -f "$GARM_CONFIG_DIR/garm-provider-lxd.toml" ];then
+ rm -rf ${GARM_CONFIG_DIR}
+fi
\ No newline at end of file
diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go
new file mode 100644
index 00000000..ca6b3030
--- /dev/null
+++ b/test/integration/suite_test.go
@@ -0,0 +1,225 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/go-openapi/runtime"
+ openapiRuntimeClient "github.com/go-openapi/runtime/client"
+ "github.com/stretchr/testify/suite"
+
+ "github.com/cloudbase/garm/client"
+ "github.com/cloudbase/garm/params"
+)
+
+var (
+ orgName string
+ repoName string
+ orgWebhookSecret string
+ workflowFileName string
+)
+
+type GarmSuite struct {
+ suite.Suite
+ cli *client.GarmAPI
+ authToken runtime.ClientAuthInfoWriter
+ ghToken string
+ credentialsName string
+ repo *params.Repository
+}
+
+func (suite *GarmSuite) SetupSuite() {
+ t := suite.T()
+ suite.ghToken = os.Getenv("GH_TOKEN")
+ orgWebhookSecret = os.Getenv("ORG_WEBHOOK_SECRET")
+ workflowFileName = os.Getenv("WORKFLOW_FILE_NAME")
+ baseURL := os.Getenv("GARM_BASE_URL")
+ adminPassword := os.Getenv("GARM_PASSWORD")
+ adminUsername := os.Getenv("GARM_ADMIN_USERNAME")
+ adminFullName := "GARM Admin"
+ adminEmail := "admin@example.com"
+ garmURL, err := url.Parse(baseURL)
+ suite.NoError(err, "error parsing GARM_BASE_URL")
+
+ apiPath, err := url.JoinPath(garmURL.Path, client.DefaultBasePath)
+ suite.NoError(err, "error joining path")
+
+ transportCfg := client.DefaultTransportConfig().
+ WithHost(garmURL.Host).
+ WithBasePath(apiPath).
+ WithSchemes([]string{garmURL.Scheme})
+ suite.cli = client.NewHTTPClientWithConfig(nil, transportCfg)
+
+ t.Log("First run")
+ newUser := params.NewUserParams{
+ Username: adminUsername,
+ Password: adminPassword,
+ FullName: adminFullName,
+ Email: adminEmail,
+ }
+ _, err = firstRun(suite.cli, newUser)
+ suite.NoError(err, "error at first run")
+
+ t.Log("Login")
+ loginParams := params.PasswordLoginParams{
+ Username: adminUsername,
+ Password: adminPassword,
+ }
+ token, err := login(suite.cli, loginParams)
+ suite.NoError(err, "error at login")
+ suite.authToken = openapiRuntimeClient.BearerToken(token)
+ t.Log("Log in successful")
+
+ suite.credentialsName = os.Getenv("CREDENTIALS_NAME")
+ suite.EnsureTestCredentials(suite.credentialsName, suite.ghToken, "github.com")
+
+ t.Log("Create repository")
+ orgName = os.Getenv("ORG_NAME")
+ repoName = os.Getenv("REPO_NAME")
+ repoWebhookSecret := os.Getenv("REPO_WEBHOOK_SECRET")
+ createParams := params.CreateRepoParams{
+ Owner: orgName,
+ Name: repoName,
+ CredentialsName: suite.credentialsName,
+ WebhookSecret: repoWebhookSecret,
+ }
+ suite.repo, err = createRepo(suite.cli, suite.authToken, createParams)
+ suite.NoError(err, "error creating repository")
+ suite.Equal(orgName, suite.repo.Owner, "owner name mismatch")
+ suite.Equal(repoName, suite.repo.Name, "repo name mismatch")
+ suite.Equal(suite.credentialsName, suite.repo.CredentialsName, "credentials name mismatch")
+}
+
+func (suite *GarmSuite) TearDownSuite() {
+ t := suite.T()
+ t.Log("Graceful cleanup")
+ // disable all the pools
+ pools, err := listPools(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing pools")
+ enabled := false
+ poolParams := params.UpdatePoolParams{Enabled: &enabled}
+ for _, pool := range pools {
+ _, err := updatePool(suite.cli, suite.authToken, pool.ID, poolParams)
+ suite.NoError(err, "error disabling pool")
+ t.Logf("Pool %s disabled during stage graceful_cleanup", pool.ID)
+ }
+
+ // delete all the instances
+ for _, pool := range pools {
+ poolInstances, err := listPoolInstances(suite.cli, suite.authToken, pool.ID)
+ suite.NoError(err, "error listing pool instances")
+ for _, instance := range poolInstances {
+ err := deleteInstance(suite.cli, suite.authToken, instance.Name, false, false)
+ suite.NoError(err, "error deleting instance")
+ t.Logf("Instance deletion initiated for instace %s during stage graceful_cleanup", instance.Name)
+ }
+ }
+
+ // wait for all instances to be deleted
+ for _, pool := range pools {
+ err := suite.waitPoolNoInstances(pool.ID, 3*time.Minute)
+ suite.NoError(err, "error waiting for pool to have no instances")
+ }
+
+ // delete all the pools
+ for _, pool := range pools {
+ err := deletePool(suite.cli, suite.authToken, pool.ID)
+ suite.NoError(err, "error deleting pool")
+ t.Logf("Pool %s deleted during stage graceful_cleanup", pool.ID)
+ }
+
+ // delete all the repositories
+ repos, err := listRepos(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing repositories")
+ for _, repo := range repos {
+ err := deleteRepo(suite.cli, suite.authToken, repo.ID)
+ suite.NoError(err, "error deleting repository")
+ t.Logf("Repo %s deleted during stage graceful_cleanup", repo.ID)
+ }
+
+ // delete all the organizations
+ orgs, err := listOrgs(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing organizations")
+ for _, org := range orgs {
+ err := deleteOrg(suite.cli, suite.authToken, org.ID)
+ suite.NoError(err, "error deleting organization")
+ t.Logf("Org %s deleted during stage graceful_cleanup", org.ID)
+ }
+}
+
+func TestGarmTestSuite(t *testing.T) {
+ suite.Run(t, new(GarmSuite))
+}
+
+func (suite *GarmSuite) waitPoolNoInstances(id string, timeout time.Duration) error {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var pool *params.Pool
+ var err error
+
+ t.Logf("Wait until pool with id %s has no instances", id)
+ for timeWaited < timeout {
+ pool, err = getPool(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error getting pool")
+ t.Logf("Current pool has %d instances", len(pool.Instances))
+ if len(pool.Instances) == 0 {
+ return nil
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ err = suite.dumpPoolInstancesDetails(pool.ID)
+ suite.NoError(err, "error dumping pool instances details")
+
+ return fmt.Errorf("failed to wait for pool %s to have no instances", pool.ID)
+}
+
+func (suite *GarmSuite) GhOrgRunnersCleanup(ghToken, orgName, controllerID string) error {
+ t := suite.T()
+ t.Logf("Cleanup Github runners for controller %s and org %s", controllerID, orgName)
+
+ client := getGithubClient(ghToken)
+ ghOrgRunners, _, err := client.Actions.ListOrganizationRunners(context.Background(), orgName, nil)
+ if err != nil {
+ return err
+ }
+
+ // Remove organization runners
+ controllerLabel := fmt.Sprintf("runner-controller-id:%s", controllerID)
+ for _, orgRunner := range ghOrgRunners.Runners {
+ for _, label := range orgRunner.Labels {
+ if label.GetName() == controllerLabel {
+ if _, err := client.Actions.RemoveOrganizationRunner(context.Background(), orgName, orgRunner.GetID()); err != nil {
+ // We don't fail if we can't remove a single runner. This
+ // is a best effort to try and remove all the orphan runners.
+ t.Logf("Failed to remove organization runner %s: %v", orgRunner.GetName(), err)
+ break
+ }
+ t.Logf("Removed organization runner %s", orgRunner.GetName())
+ break
+ }
+ }
+ }
+ return nil
+}
diff --git a/test/integration/utils.go b/test/integration/utils.go
new file mode 100644
index 00000000..1fa35b5e
--- /dev/null
+++ b/test/integration/utils.go
@@ -0,0 +1,48 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "encoding/json"
+ "fmt"
+ "log/slog"
+)
+
+func printJSONResponse(resp interface{}) error {
+ b, err := json.MarshalIndent(resp, "", " ")
+ if err != nil {
+ return err
+ }
+ slog.Info(string(b))
+ return nil
+}
+
+type apiCodeGetter interface {
+ IsCode(code int) bool
+}
+
+func expectAPIStatusCode(err error, expectedCode int) error {
+ if err == nil {
+ return fmt.Errorf("expected error, got nil")
+ }
+ apiErr, ok := err.(apiCodeGetter)
+ if !ok {
+ return fmt.Errorf("expected API error, got %v (%T)", err, err)
+ }
+ if !apiErr.IsCode(expectedCode) {
+ return fmt.Errorf("expected status code %d: %v", expectedCode, err)
+ }
+
+ return nil
+}
diff --git a/testdata/config.toml b/testdata/config.toml
index c0532d8e..337c0dd6 100644
--- a/testdata/config.toml
+++ b/testdata/config.toml
@@ -1,31 +1,42 @@
[default]
-# This URL is used by instances to send back status messages as they install
-# the github actions runner. Status messages can be seen by querying the
-# runner status in garm.
-# Note: If you're using a reverse proxy in front of your garm installation,
-# this URL needs to point to the address of the reverse proxy. Using TLS is
-# highly encouraged.
-callback_url = "https://garm.example.com/api/v1/callbacks/status"
-
-# This URL is used by instances to retrieve information they need to set themselves
-# up. Access to this URL is granted using the same JWT token used to send back
-# status updates. Once the instance transitions to "installed" or "failed" state,
-# access to both the status and metadata endpoints is disabled.
-# Note: If you're using a reverse proxy in front of your garm installation,
-# this URL needs to point to the address of the reverse proxy. Using TLS is
-# highly encouraged.
-metadata_url = "https://garm.example.com/api/v1/metadata"
+# This option enables GARM to manage webhooks for repositories and organizations. Set this
+# to false to disable the API routes that manage webhooks.
+#
+# When managing webhooks, the PAT you're using must have the necessary access to create/list/delete
+# webhooks for repositories or organizations.
+enable_webhook_management = true
+# DEPRECATED: Use the [logging] section to set this option.
# Uncomment this line if you'd like to log to a file instead of standard output.
# log_file = "/tmp/runner-manager.log"
+# DEPRECATED: Use the [logging] section to set this option.
# Enable streaming logs via web sockets. Use garm-cli debug-log.
enable_log_streamer = false
# Enable the golang debug server. See the documentation in the "doc" folder for more information.
debug_server = false
+
+[logging]
+# Uncomment this line if you'd like to log to a file instead of standard output.
+# log_file = "/tmp/runner-manager.log"
+
+# enable_log_streamer enables streaming the logs over websockets
+enable_log_streamer = true
+# log_format is the output format of the logs. GARM uses structured logging and can
+# output as "text" or "json"
+log_format = "text"
+# log_level is the logging level GARM will output. Available log levels are:
+# * debug
+# * info
+# * warn
+# * error
+log_level = "debug"
+# log_source will output information about the function that generated the log line.
+log_source = false
+
[metrics]
# Toggle metrics. If set to false, the API endpoint for metrics collection will
# be disabled.
@@ -71,6 +82,8 @@ time_to_live = "8760h"
certificate = ""
# The path on disk to the corresponding private key for the certificate.
key = ""
+ [apiserver.webui]
+ enable = true
[database]
# Turn on/off debugging for database queries.
@@ -86,6 +99,11 @@ time_to_live = "8760h"
[database.sqlite3]
# Path on disk to the sqlite3 database file.
db_file = "/etc/garm/garm.db"
+ # busy_timeout_seconds is an optional parameter that will set the
+ # sqlite3_busy_timeout to the specified value. This is useful when
+ # GARM may be under heavy load and the database is locked by some
+ # other go routine. The default value is 0.
+ busy_timeout_seconds = 5
# Currently, providers are defined statically in the config. This is due to the fact
# that we have not yet added support for storing secrets in something like Barbican
@@ -94,15 +112,21 @@ time_to_live = "8760h"
# provider must not be changed, or the pool will no longer work. Make sure you remove any
# pools before removing or changing a provider.
[[provider]]
- # An arbitrary string describing this provider.
- name = "lxd_local"
- # Provider type. Garm is designed to allow creating providers which are used to spin
- # up compute resources, which in turn will run the github runner software.
- # Currently, LXD is the only supprted provider, but more will be written in the future.
- provider_type = "lxd"
- # A short description of this provider. The name, description and provider types will
- # be included in the information returned by the API when listing available providers.
- description = "Local LXD installation"
+# An arbitrary string describing this provider.
+name = "lxd_local"
+# Provider type. Garm is designed to allow creating providers which are used to spin
+# up compute resources, which in turn will run the github runner software.
+# Currently, LXD is the only supprted provider, but more will be written in the future.
+provider_type = "lxd"
+# A short description of this provider. The name, description and provider types will
+# be included in the information returned by the API when listing available providers.
+description = "Local LXD installation"
+# DisableJITConfig explicitly disables JIT configuration and forces runner registration
+# tokens to be used. This may happen if a provider has not yet been updated to support
+# JIT configuration.
+#
+# Set this to true if your provider does not support JIT configuration.
+disable_jit_config = false
[provider.lxd]
# the path to the unix socket that LXD is listening on. This works if garm and LXD
# are on the same system, and this option takes precedence over the "url" option,
@@ -173,6 +197,12 @@ time_to_live = "8760h"
name = "openstack_external"
description = "external openstack provider"
provider_type = "external"
+# DisableJITConfig explicitly disables JIT configuration and forces runner registration
+# tokens to be used. This may happen if a provider has not yet been updated to support
+# JIT configuration.
+#
+# Set this to true if your provider does not support JIT configuration.
+disable_jit_config = false
[provider.external]
# config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable
config_file = "/etc/garm/providers.d/openstack/keystonerc"
@@ -185,6 +215,12 @@ provider_type = "external"
name = "azure_external"
description = "external azure provider"
provider_type = "external"
+# DisableJITConfig explicitly disables JIT configuration and forces runner registration
+# tokens to be used. This may happen if a provider has not yet been updated to support
+# JIT configuration.
+#
+# Set this to true if your provider does not support JIT configuration.
+disable_jit_config = false
[provider.external]
# config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable
config_file = "/etc/garm/providers.d/azure/config.sh"
@@ -192,32 +228,3 @@ provider_type = "external"
# anything (bash, a binary, python, etc). See documentation in this repo on how to write an
# external provider.
provider_executable = "/etc/garm/providers.d/azure/garm-external-provider"
-
-# This is a list of credentials that you can define as part of the repository
-# or organization definitions. They are not saved inside the database, as there
-# is no Vault integration (yet). This will change in the future.
-# Credentials defined here can be listed using the API. Obviously, only the name
-# and descriptions are returned.
-[[github]]
- name = "gabriel"
- description = "github token or user gabriel"
- # This is a personal token with access to the repositories and organizations
- # you plan on adding to garm. The "workflow" option needs to be selected in order
- # to work with repositories, and the admin:org needs to be set if you plan on
- # adding an organization.
- oauth2_token = "super secret token"
- # base_url (optional) is the URL at which your GitHub Enterprise Server can be accessed.
- # If these credentials are for github.com, leave this setting blank
- base_url = "https://ghe.example.com"
- # api_base_url (optional) is the base URL where the GitHub Enterprise Server API can be accessed.
- # Leave this blank if these credentials are for github.com.
- api_base_url = "https://ghe.example.com"
- # upload_base_url (optional) is the base URL where the GitHub Enterprise Server upload API can be accessed.
- # Leave this blank if these credentials are for github.com, or if you don't have a separate URL
- # for the upload API.
- upload_base_url = "https://api.ghe.example.com"
- # ca_cert_bundle (optional) is the CA certificate bundle in PEM format that will be used by the github
- # client to talk to the API. This bundle will also be sent to all runners as bootstrap params.
- # Use this option if you're using a self signed certificate.
- # Leave this blank if you're using github.com or if your certificare is signed by a valid CA.
- ca_cert_bundle = "/etc/garm/ghe.crt"
diff --git a/testdata/db/v0.1.4/garm.db b/testdata/db/v0.1.4/garm.db
new file mode 100644
index 00000000..7308e31f
Binary files /dev/null and b/testdata/db/v0.1.4/garm.db differ
diff --git a/util/appdefaults/appdefaults.go b/util/appdefaults/appdefaults.go
index d0d86976..cc53f794 100644
--- a/util/appdefaults/appdefaults.go
+++ b/util/appdefaults/appdefaults.go
@@ -1,3 +1,16 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package appdefaults
import "time"
@@ -27,4 +40,16 @@ const (
// uploadBaseURL is the default URL for guthub uploads.
GithubDefaultUploadBaseURL = "https://uploads.github.com/"
+
+ // metrics data update interval
+ DefaultMetricsUpdateInterval = 60 * time.Second
)
+
+var Version string
+
+func GetVersion() string {
+ if Version == "" {
+ Version = "v0.0.0-unknown"
+ }
+ return Version
+}
diff --git a/util/github/client.go b/util/github/client.go
new file mode 100644
index 00000000..b4ca32e5
--- /dev/null
+++ b/util/github/client.go
@@ -0,0 +1,628 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package github
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/google/go-github/v72/github"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/cache"
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner/common"
+)
+
+type githubClient struct {
+ *github.ActionsService
+ org *github.OrganizationsService
+ repo *github.RepositoriesService
+ enterprise *github.EnterpriseService
+ rateLimit *github.RateLimitService
+
+ entity params.ForgeEntity
+ cli *github.Client
+}
+
+func (g *githubClient) ListEntityHooks(ctx context.Context, opts *github.ListOptions) (ret []*github.Hook, response *github.Response, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListHooks", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListHooks", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.repo.ListHooks(ctx, g.entity.Owner, g.entity.Name, opts)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.org.ListHooks(ctx, g.entity.Owner, opts)
+ default:
+ return nil, nil, fmt.Errorf("invalid entity type: %s", g.entity.EntityType)
+ }
+ return ret, response, err
+}
+
+func (g *githubClient) GetEntityHook(ctx context.Context, id int64) (ret *github.Hook, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "GetHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "GetHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, _, err = g.repo.GetHook(ctx, g.entity.Owner, g.entity.Name, id)
+ case params.ForgeEntityTypeOrganization:
+ ret, _, err = g.org.GetHook(ctx, g.entity.Owner, id)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
+
+func (g *githubClient) createGithubEntityHook(ctx context.Context, hook *github.Hook) (ret *github.Hook, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "CreateHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "CreateHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, _, err = g.repo.CreateHook(ctx, g.entity.Owner, g.entity.Name, hook)
+ case params.ForgeEntityTypeOrganization:
+ ret, _, err = g.org.CreateHook(ctx, g.entity.Owner, hook)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
+
+func (g *githubClient) CreateEntityHook(ctx context.Context, hook *github.Hook) (ret *github.Hook, err error) {
+ switch g.entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ return g.createGithubEntityHook(ctx, hook)
+ case params.GiteaEndpointType:
+ return g.createGiteaEntityHook(ctx, hook)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+}
+
+func (g *githubClient) DeleteEntityHook(ctx context.Context, id int64) (ret *github.Response, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "DeleteHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "DeleteHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, err = g.repo.DeleteHook(ctx, g.entity.Owner, g.entity.Name, id)
+ case params.ForgeEntityTypeOrganization:
+ ret, err = g.org.DeleteHook(ctx, g.entity.Owner, id)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
+
+func (g *githubClient) PingEntityHook(ctx context.Context, id int64) (ret *github.Response, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "PingHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "PingHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, err = g.repo.PingHook(ctx, g.entity.Owner, g.entity.Name, id)
+ case params.ForgeEntityTypeOrganization:
+ ret, err = g.org.PingHook(ctx, g.entity.Owner, id)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
+
+func (g *githubClient) ListEntityRunners(ctx context.Context, opts *github.ListRunnersOptions) (*github.Runners, *github.Response, error) {
+ var ret *github.Runners
+ var response *github.Response
+ var err error
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListEntityRunners", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListEntityRunners", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.ListRunners(ctx, g.entity.Owner, g.entity.Name, opts)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.ListOrganizationRunners(ctx, g.entity.Owner, opts)
+ case params.ForgeEntityTypeEnterprise:
+ ret, response, err = g.enterprise.ListRunners(ctx, g.entity.Owner, opts)
+ default:
+ return nil, nil, errors.New("invalid entity type")
+ }
+
+ return ret, response, err
+}
+
+func (g *githubClient) ListEntityRunnerApplicationDownloads(ctx context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error) {
+ var ret []*github.RunnerApplicationDownload
+ var response *github.Response
+ var err error
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListEntityRunnerApplicationDownloads", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListEntityRunnerApplicationDownloads", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.ListRunnerApplicationDownloads(ctx, g.entity.Owner, g.entity.Name)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.ListOrganizationRunnerApplicationDownloads(ctx, g.entity.Owner)
+ case params.ForgeEntityTypeEnterprise:
+ ret, response, err = g.enterprise.ListRunnerApplicationDownloads(ctx, g.entity.Owner)
+ default:
+ return nil, nil, errors.New("invalid entity type")
+ }
+
+ return ret, response, err
+}
+
+func parseError(response *github.Response, err error) error {
+ var statusCode int
+ if response != nil {
+ statusCode = response.StatusCode
+ }
+
+ switch statusCode {
+ case http.StatusNotFound:
+ return runnerErrors.ErrNotFound
+ case http.StatusUnauthorized:
+ return runnerErrors.ErrUnauthorized
+ case http.StatusUnprocessableEntity:
+ return runnerErrors.ErrBadRequest
+ default:
+ if statusCode >= 100 && statusCode < 300 {
+ return nil
+ }
+ if err != nil {
+ errResp := &github.ErrorResponse{}
+ if errors.As(err, &errResp) && errResp.Response != nil {
+ switch errResp.Response.StatusCode {
+ case http.StatusNotFound:
+ return runnerErrors.ErrNotFound
+ case http.StatusUnauthorized:
+ return runnerErrors.ErrUnauthorized
+ case http.StatusUnprocessableEntity:
+ return runnerErrors.ErrBadRequest
+ default:
+ // ugly hack. Gitea returns 500 if we try to remove a runner that does not exist.
+ if strings.Contains(err.Error(), "does not exist") {
+ return runnerErrors.ErrNotFound
+ }
+ return err
+ }
+ }
+ return err
+ }
+ return errors.New("unknown error")
+ }
+}
+
+func (g *githubClient) RemoveEntityRunner(ctx context.Context, runnerID int64) error {
+ var response *github.Response
+ var err error
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "RemoveEntityRunner", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "RemoveEntityRunner", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ response, err = g.RemoveRunner(ctx, g.entity.Owner, g.entity.Name, runnerID)
+ case params.ForgeEntityTypeOrganization:
+ response, err = g.RemoveOrganizationRunner(ctx, g.entity.Owner, runnerID)
+ case params.ForgeEntityTypeEnterprise:
+ response, err = g.enterprise.RemoveRunner(ctx, g.entity.Owner, runnerID)
+ default:
+ return errors.New("invalid entity type")
+ }
+
+ if err := parseError(response, err); err != nil {
+ return fmt.Errorf("error removing runner %d: %w", runnerID, err)
+ }
+
+ return nil
+}
+
+func (g *githubClient) CreateEntityRegistrationToken(ctx context.Context) (*github.RegistrationToken, *github.Response, error) {
+ var ret *github.RegistrationToken
+ var response *github.Response
+ var err error
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "CreateEntityRegistrationToken", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "CreateEntityRegistrationToken", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.CreateRegistrationToken(ctx, g.entity.Owner, g.entity.Name)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.CreateOrganizationRegistrationToken(ctx, g.entity.Owner)
+ case params.ForgeEntityTypeEnterprise:
+ ret, response, err = g.enterprise.CreateRegistrationToken(ctx, g.entity.Owner)
+ default:
+ return nil, nil, errors.New("invalid entity type")
+ }
+
+ return ret, response, err
+}
+
+func (g *githubClient) getOrganizationRunnerGroupIDByName(ctx context.Context, entity params.ForgeEntity, rgName string) (int64, error) {
+ opts := github.ListOrgRunnerGroupOptions{
+ ListOptions: github.ListOptions{
+ PerPage: 100,
+ },
+ }
+
+ for {
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListOrganizationRunnerGroups", // label: operation
+ entity.LabelScope(), // label: scope
+ ).Inc()
+ runnerGroups, ghResp, err := g.ListOrganizationRunnerGroups(ctx, entity.Owner, &opts)
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListOrganizationRunnerGroups", // label: operation
+ entity.LabelScope(), // label: scope
+ ).Inc()
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return 0, fmt.Errorf("error fetching runners: %w", runnerErrors.ErrUnauthorized)
+ }
+ return 0, fmt.Errorf("error fetching runners: %w", err)
+ }
+ for _, runnerGroup := range runnerGroups.RunnerGroups {
+ if runnerGroup.Name != nil && *runnerGroup.Name == rgName {
+ return *runnerGroup.ID, nil
+ }
+ }
+ if ghResp.NextPage == 0 {
+ break
+ }
+ opts.Page = ghResp.NextPage
+ }
+ return 0, runnerErrors.NewNotFoundError("runner group %s not found", rgName)
+}
+
+func (g *githubClient) getEnterpriseRunnerGroupIDByName(ctx context.Context, entity params.ForgeEntity, rgName string) (int64, error) {
+ opts := github.ListEnterpriseRunnerGroupOptions{
+ ListOptions: github.ListOptions{
+ PerPage: 100,
+ },
+ }
+
+ for {
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListRunnerGroups", // label: operation
+ entity.LabelScope(), // label: scope
+ ).Inc()
+ runnerGroups, ghResp, err := g.enterprise.ListRunnerGroups(ctx, entity.Owner, &opts)
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListRunnerGroups", // label: operation
+ entity.LabelScope(), // label: scope
+ ).Inc()
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return 0, fmt.Errorf("error fetching runners: %w", runnerErrors.ErrUnauthorized)
+ }
+ return 0, fmt.Errorf("error fetching runners: %w", err)
+ }
+ for _, runnerGroup := range runnerGroups.RunnerGroups {
+ if runnerGroup.Name != nil && *runnerGroup.Name == rgName {
+ return *runnerGroup.ID, nil
+ }
+ }
+ if ghResp.NextPage == 0 {
+ break
+ }
+ opts.Page = ghResp.NextPage
+ }
+ return 0, runnerErrors.NewNotFoundError("runner group not found")
+}
+
+func (g *githubClient) GetEntityRunnerGroupIDByName(ctx context.Context, runnerGroupName string) (int64, error) {
+ var rgID int64 = 1
+
+ if g.entity.EntityType == params.ForgeEntityTypeRepository {
+ // This is a repository. Runner groups are supported at the org and
+ // enterprise levels. Return the default runner group id, early.
+ return rgID, nil
+ }
+
+ var ok bool
+ var err error
+ // attempt to get the runner group ID from cache. Cache will invalidate after 1 hour.
+ if runnerGroupName != "" && !strings.EqualFold(runnerGroupName, "default") {
+ rgID, ok = cache.GetEntityRunnerGroup(g.entity.ID, runnerGroupName)
+ if !ok || rgID == 0 {
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeOrganization:
+ rgID, err = g.getOrganizationRunnerGroupIDByName(ctx, g.entity, runnerGroupName)
+ case params.ForgeEntityTypeEnterprise:
+ rgID, err = g.getEnterpriseRunnerGroupIDByName(ctx, g.entity, runnerGroupName)
+ }
+
+ if err != nil {
+ return 0, fmt.Errorf("getting runner group ID: %w", err)
+ }
+ }
+ // set cache. Avoid getting the same runner group for more than once an hour.
+ cache.SetEntityRunnerGroup(g.entity.ID, runnerGroupName, rgID)
+ }
+ return rgID, nil
+}
+
+func (g *githubClient) GetEntityJITConfig(ctx context.Context, instance string, pool params.Pool, labels []string) (jitConfigMap map[string]string, runner *github.Runner, err error) {
+ rgID, err := g.GetEntityRunnerGroupIDByName(ctx, pool.GitHubRunnerGroup)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get runner group: %w", err)
+ }
+ slog.DebugContext(ctx, "using runner group", "group_name", pool.GitHubRunnerGroup, "runner_group_id", rgID)
+ req := github.GenerateJITConfigRequest{
+ Name: instance,
+ RunnerGroupID: rgID,
+ Labels: labels,
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): Should we make this configurable?
+ WorkFolder: github.Ptr("_work"),
+ }
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "GetEntityJITConfig", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+
+ var ret *github.JITRunnerConfig
+ var response *github.Response
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.GenerateRepoJITConfig(ctx, g.entity.Owner, g.entity.Name, &req)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.GenerateOrgJITConfig(ctx, g.entity.Owner, &req)
+ case params.ForgeEntityTypeEnterprise:
+ ret, response, err = g.enterprise.GenerateEnterpriseJITConfig(ctx, g.entity.Owner, &req)
+ }
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "GetEntityJITConfig", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ if response != nil && response.StatusCode == http.StatusUnauthorized {
+ return nil, nil, fmt.Errorf("failed to get JIT config: %w", err)
+ }
+ return nil, nil, fmt.Errorf("failed to get JIT config: %w", err)
+ }
+
+ defer func(run *github.Runner) {
+ if err != nil && run != nil {
+ innerErr := g.RemoveEntityRunner(ctx, run.GetID())
+ slog.With(slog.Any("error", innerErr)).ErrorContext(
+ ctx, "failed to remove runner",
+ "runner_id", run.GetID(), string(g.entity.EntityType), g.entity.String())
+ }
+ }(ret.Runner)
+
+ decoded, err := base64.StdEncoding.DecodeString(*ret.EncodedJITConfig)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to decode JIT config: %w", err)
+ }
+
+ var jitConfig map[string]string
+ if err := json.Unmarshal(decoded, &jitConfig); err != nil {
+ return nil, nil, fmt.Errorf("failed to unmarshal JIT config: %w", err)
+ }
+
+ return jitConfig, ret.Runner, nil
+}
+
+func (g *githubClient) RateLimit(ctx context.Context) (*github.RateLimits, error) {
+ limits, resp, err := g.rateLimit.Get(ctx)
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "GetRateLimit", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ if err := parseError(resp, err); err != nil {
+ return nil, fmt.Errorf("getting rate limit: %w", err)
+ }
+ return limits, nil
+}
+
+func (g *githubClient) GetEntity() params.ForgeEntity {
+ return g.entity
+}
+
+func (g *githubClient) GithubBaseURL() *url.URL {
+ return g.cli.BaseURL
+}
+
+func NewRateLimitClient(ctx context.Context, credentials params.ForgeCredentials) (common.RateLimitClient, error) {
+ httpClient, err := credentials.GetHTTPClient(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching http client: %w", err)
+ }
+
+ slog.DebugContext(
+ ctx, "creating rate limit client",
+ "base_url", credentials.APIBaseURL,
+ "upload_url", credentials.UploadBaseURL)
+
+ ghClient, err := github.NewClient(httpClient).WithEnterpriseURLs(
+ credentials.APIBaseURL, credentials.UploadBaseURL)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github client: %w", err)
+ }
+ cli := &githubClient{
+ rateLimit: ghClient.RateLimit,
+ cli: ghClient,
+ }
+
+ return cli, nil
+}
+
+func withGiteaURLs(client *github.Client, apiBaseURL string) (*github.Client, error) {
+ if client == nil {
+ return nil, errors.New("client is nil")
+ }
+
+ if apiBaseURL == "" {
+ return nil, errors.New("invalid gitea URLs")
+ }
+
+ parsedBaseURL, err := url.ParseRequestURI(apiBaseURL)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing gitea base URL: %w", err)
+ }
+
+ if !strings.HasSuffix(parsedBaseURL.Path, "/") {
+ parsedBaseURL.Path += "/"
+ }
+
+ if !strings.HasSuffix(parsedBaseURL.Path, "/api/v1/") {
+ parsedBaseURL.Path += "api/v1/"
+ }
+
+ client.BaseURL = parsedBaseURL
+ client.UploadURL = parsedBaseURL
+
+ return client, nil
+}
+
+func Client(ctx context.Context, entity params.ForgeEntity) (common.GithubClient, error) {
+ // func GithubClient(ctx context.Context, entity params.ForgeEntity) (common.GithubClient, error) {
+ httpClient, err := entity.Credentials.GetHTTPClient(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching http client: %w", err)
+ }
+
+ slog.DebugContext(
+ ctx, "creating client for entity",
+ "entity", entity.String(), "base_url", entity.Credentials.APIBaseURL,
+ "upload_url", entity.Credentials.UploadBaseURL)
+
+ ghClient := github.NewClient(httpClient)
+ switch entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ ghClient, err = ghClient.WithEnterpriseURLs(entity.Credentials.APIBaseURL, entity.Credentials.UploadBaseURL)
+ case params.GiteaEndpointType:
+ ghClient, err = withGiteaURLs(ghClient, entity.Credentials.APIBaseURL)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github client: %w", err)
+ }
+
+ cli := &githubClient{
+ ActionsService: ghClient.Actions,
+ org: ghClient.Organizations,
+ repo: ghClient.Repositories,
+ enterprise: ghClient.Enterprise,
+ rateLimit: ghClient.RateLimit,
+ cli: ghClient,
+ entity: entity,
+ }
+
+ return cli, nil
+}
diff --git a/util/github/gitea.go b/util/github/gitea.go
new file mode 100644
index 00000000..5d35190b
--- /dev/null
+++ b/util/github/gitea.go
@@ -0,0 +1,116 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package github
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/google/go-github/v72/github"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+)
+
+type createGiteaHookOptions struct {
+ Type string `json:"type"`
+ Config map[string]string `json:"config"`
+ Events []string `json:"events"`
+ BranchFilter string `json:"branch_filter"`
+ Active bool `json:"active"`
+ AuthorizationHeader string `json:"authorization_header"`
+}
+
+func (g *githubClient) createGiteaRepoHook(ctx context.Context, owner, name string, hook *github.Hook) (ret *github.Hook, err error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks", owner, name)
+ createOpts := &createGiteaHookOptions{
+ Type: "gitea",
+ Events: hook.Events,
+ Active: hook.GetActive(),
+ BranchFilter: "*",
+ Config: map[string]string{
+ "content_type": hook.GetConfig().GetContentType(),
+ "url": hook.GetConfig().GetURL(),
+ "http_method": "post",
+ "secret": hook.GetConfig().GetSecret(),
+ },
+ }
+
+ req, err := g.cli.NewRequest(http.MethodPost, u, createOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ hook = new(github.Hook)
+ _, err = g.cli.Do(ctx, req, hook)
+ if err != nil {
+ return nil, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ return hook, nil
+}
+
+func (g *githubClient) createGiteaOrgHook(ctx context.Context, owner string, hook *github.Hook) (ret *github.Hook, err error) {
+ u := fmt.Sprintf("orgs/%v/hooks", owner)
+ createOpts := &createGiteaHookOptions{
+ Type: "gitea",
+ Events: hook.Events,
+ Active: hook.GetActive(),
+ BranchFilter: "*",
+ Config: map[string]string{
+ "content_type": hook.GetConfig().GetContentType(),
+ "url": hook.GetConfig().GetURL(),
+ "http_method": "post",
+ "secret": hook.GetConfig().GetSecret(),
+ },
+ }
+
+ req, err := g.cli.NewRequest(http.MethodPost, u, createOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ hook = new(github.Hook)
+ _, err = g.cli.Do(ctx, req, hook)
+ if err != nil {
+ return nil, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ return hook, nil
+}
+
+func (g *githubClient) createGiteaEntityHook(ctx context.Context, hook *github.Hook) (ret *github.Hook, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "CreateHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "CreateHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, err = g.createGiteaRepoHook(ctx, g.entity.Owner, g.entity.Name, hook)
+ case params.ForgeEntityTypeOrganization:
+ ret, err = g.createGiteaOrgHook(ctx, g.entity.Owner, hook)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
diff --git a/util/github/scalesets/client.go b/util/github/scalesets/client.go
new file mode 100644
index 00000000..6b4b1bab
--- /dev/null
+++ b/util/github/scalesets/client.go
@@ -0,0 +1,104 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
+
+ "github.com/google/go-github/v72/github"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner/common"
+)
+
+func NewClient(cli common.GithubClient) (*ScaleSetClient, error) {
+ return &ScaleSetClient{
+ ghCli: cli,
+ httpClient: &http.Client{},
+ }, nil
+}
+
+type ScaleSetClient struct {
+ ghCli common.GithubClient
+ httpClient *http.Client
+
+ // scale sets are aparently available through the same security
+ // contex that a normal runner would use. We connect to the same
+ // API endpoint a runner would connect to, in order to fetch jobs.
+ // To do this, we use a runner registration token.
+ runnerRegistrationToken *github.RegistrationToken
+ // actionsServiceInfo holds the pipeline URL and the JWT token to
+ // access it. The pipeline URL is the base URL where we can access
+ // the scale set endpoints.
+ actionsServiceInfo *params.ActionsServiceAdminInfoResponse
+
+ mux sync.Mutex
+}
+
+func (s *ScaleSetClient) SetGithubClient(cli common.GithubClient) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+ s.ghCli = cli
+}
+
+func (s *ScaleSetClient) GetGithubClient() (common.GithubClient, error) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+ if s.ghCli == nil {
+ return nil, fmt.Errorf("github client is not set in scaleset client")
+ }
+ return s.ghCli, nil
+}
+
+func (s *ScaleSetClient) Do(req *http.Request) (*http.Response, error) {
+ if s.httpClient == nil {
+ return nil, fmt.Errorf("http client is not initialized")
+ }
+
+ resp, err := s.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to dispatch HTTP request: %w", err)
+ }
+
+ if resp.StatusCode >= 200 && resp.StatusCode < 300 {
+ return resp, nil
+ }
+
+ var body []byte
+ if resp != nil {
+ defer resp.Body.Close()
+ body, err = io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read body: %w", err)
+ }
+ }
+
+ switch resp.StatusCode {
+ case 404:
+ return nil, runnerErrors.NewNotFoundError("resource %s not found: %q", req.URL.String(), string(body))
+ case 400:
+ return nil, runnerErrors.NewBadRequestError("bad request while calling %s: %q", req.URL.String(), string(body))
+ case 409:
+ return nil, runnerErrors.NewConflictError("conflict while calling %s: %q", req.URL.String(), string(body))
+ case 401, 403:
+ return nil, runnerErrors.ErrUnauthorized
+ default:
+ return nil, fmt.Errorf("request to %s failed with status code %d: %q", req.URL.String(), resp.StatusCode, string(body))
+ }
+}
diff --git a/util/github/scalesets/jobs.go b/util/github/scalesets/jobs.go
new file mode 100644
index 00000000..defc9506
--- /dev/null
+++ b/util/github/scalesets/jobs.go
@@ -0,0 +1,88 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/cloudbase/garm/params"
+)
+
+type acquireJobsResult struct {
+ Count int `json:"count"`
+ Value []int64 `json:"value"`
+}
+
+func (s *ScaleSetClient) AcquireJobs(ctx context.Context, runnerScaleSetID int, messageQueueAccessToken string, requestIDs []int64) ([]int64, error) {
+ u := fmt.Sprintf("%s/%d/acquirejobs?api-version=6.0-preview", scaleSetEndpoint, runnerScaleSetID)
+
+ body, err := json.Marshal(requestIDs)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodPost, u, bytes.NewBuffer(body))
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", messageQueueAccessToken))
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var acquiredJobs acquireJobsResult
+ err = json.NewDecoder(resp.Body).Decode(&acquiredJobs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return acquiredJobs.Value, nil
+}
+
+func (s *ScaleSetClient) GetAcquirableJobs(ctx context.Context, runnerScaleSetID int) (params.AcquirableJobList, error) {
+ path := fmt.Sprintf("%d/acquirablejobs", runnerScaleSetID)
+
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.AcquirableJobList{}, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.AcquirableJobList{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusNoContent {
+ return params.AcquirableJobList{Count: 0, Jobs: []params.AcquirableJob{}}, nil
+ }
+
+ var acquirableJobList params.AcquirableJobList
+ err = json.NewDecoder(resp.Body).Decode(&acquirableJobList)
+ if err != nil {
+ return params.AcquirableJobList{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return acquirableJobList, nil
+}
diff --git a/util/github/scalesets/message_sessions.go b/util/github/scalesets/message_sessions.go
new file mode 100644
index 00000000..8fafc2c4
--- /dev/null
+++ b/util/github/scalesets/message_sessions.go
@@ -0,0 +1,291 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "math/big"
+ "net/http"
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+ garmUtil "github.com/cloudbase/garm/util"
+)
+
+const maxCapacityHeader = "X-ScaleSetMaxCapacity"
+
+type MessageSession struct {
+ ssCli *ScaleSetClient
+ session *params.RunnerScaleSetSession
+ ctx context.Context
+
+ done chan struct{}
+ closed bool
+ lastErr error
+
+ mux sync.Mutex
+}
+
+func (m *MessageSession) Close() error {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+ if m.closed {
+ return nil
+ }
+ close(m.done)
+ m.closed = true
+ return nil
+}
+
+func (m *MessageSession) MessageQueueAccessToken() string {
+ return m.session.MessageQueueAccessToken
+}
+
+func (m *MessageSession) LastError() error {
+ return m.lastErr
+}
+
+func (m *MessageSession) loop() {
+ slog.DebugContext(m.ctx, "starting message session refresh loop", "session_id", m.session.SessionID.String())
+ timer := time.NewTicker(1 * time.Minute)
+ defer timer.Stop()
+ defer m.Close()
+
+ if m.closed {
+ slog.DebugContext(m.ctx, "message session refresh loop closed")
+ return
+ }
+ for {
+ select {
+ case <-m.ctx.Done():
+ slog.DebugContext(m.ctx, "message session refresh loop context done")
+ return
+ case <-m.done:
+ slog.DebugContext(m.ctx, "message session refresh loop done")
+ return
+ case <-timer.C:
+ if err := m.maybeRefreshToken(m.ctx); err != nil {
+ // We endlessly retry. If it's a transient error, it should eventually
+ // work, if it's credentials issues, users can update them.
+ slog.With(slog.Any("error", err)).ErrorContext(m.ctx, "failed to refresh message queue token")
+ m.lastErr = err
+ continue
+ }
+ m.lastErr = nil
+ }
+ }
+}
+
+func (m *MessageSession) SessionsRelativeURL() (string, error) {
+ if m.session == nil {
+ return "", fmt.Errorf("session is nil")
+ }
+ if m.session.RunnerScaleSet == nil {
+ return "", fmt.Errorf("runner scale set is nil")
+ }
+ relativePath := fmt.Sprintf("%s/%d/sessions/%s", scaleSetEndpoint, m.session.RunnerScaleSet.ID, m.session.SessionID.String())
+ return relativePath, nil
+}
+
+func (m *MessageSession) Refresh(ctx context.Context) error {
+ slog.DebugContext(ctx, "refreshing message session token", "session_id", m.session.SessionID.String())
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ relPath, err := m.SessionsRelativeURL()
+ if err != nil {
+ return fmt.Errorf("failed to get session URL: %w", err)
+ }
+ req, err := m.ssCli.newActionsRequest(ctx, http.MethodPatch, relPath, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create message delete request: %w", err)
+ }
+ resp, err := m.ssCli.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to delete message session: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var refreshedSession params.RunnerScaleSetSession
+ if err := json.NewDecoder(resp.Body).Decode(&refreshedSession); err != nil {
+ return fmt.Errorf("failed to decode response: %w", err)
+ }
+ slog.DebugContext(ctx, "refreshed message session token")
+ m.session = &refreshedSession
+ return nil
+}
+
+func (m *MessageSession) maybeRefreshToken(ctx context.Context) error {
+ if m.session == nil {
+ return fmt.Errorf("session is nil")
+ }
+
+ expiresAt, err := m.session.ExiresAt()
+ if err != nil {
+ return fmt.Errorf("failed to get expires at: %w", err)
+ }
+ // add some jitter (30 second interval)
+ randInt, err := rand.Int(rand.Reader, big.NewInt(30))
+ if err != nil {
+ return fmt.Errorf("failed to get a random number")
+ }
+ expiresIn := time.Duration(randInt.Int64())*time.Second + 10*time.Minute
+ slog.DebugContext(ctx, "checking if message session token needs refresh", "expires_at", expiresAt)
+ if m.session.ExpiresIn(expiresIn) {
+ if err := m.Refresh(ctx); err != nil {
+ return fmt.Errorf("failed to refresh message queue token: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func (m *MessageSession) GetMessage(ctx context.Context, lastMessageID int64, maxCapacity uint) (params.RunnerScaleSetMessage, error) {
+ u, err := url.Parse(m.session.MessageQueueURL)
+ if err != nil {
+ return params.RunnerScaleSetMessage{}, err
+ }
+
+ if lastMessageID > 0 {
+ q := u.Query()
+ q.Set("lastMessageId", strconv.FormatInt(lastMessageID, 10))
+ u.RawQuery = q.Encode()
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+ if err != nil {
+ return params.RunnerScaleSetMessage{}, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ req.Header.Set("Accept", "application/json; api-version=6.0-preview")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", m.session.MessageQueueAccessToken))
+ req.Header.Set(maxCapacityHeader, fmt.Sprintf("%d", maxCapacity))
+
+ resp, err := m.ssCli.Do(req)
+ if err != nil {
+ return params.RunnerScaleSetMessage{}, fmt.Errorf("request to %s failed: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusAccepted {
+ slog.DebugContext(ctx, "no messages available in queue")
+ return params.RunnerScaleSetMessage{}, nil
+ }
+
+ var message params.RunnerScaleSetMessage
+ if err := json.NewDecoder(resp.Body).Decode(&message); err != nil {
+ return params.RunnerScaleSetMessage{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return message, nil
+}
+
+func (m *MessageSession) DeleteMessage(ctx context.Context, messageID int64) error {
+ u, err := url.Parse(m.session.MessageQueueURL)
+ if err != nil {
+ return err
+ }
+
+ u.Path = fmt.Sprintf("%s/%d", u.Path, messageID)
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u.String(), nil)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", m.session.MessageQueueAccessToken))
+
+ resp, err := m.ssCli.Do(req)
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+
+ return nil
+}
+
+func (s *ScaleSetClient) CreateMessageSession(ctx context.Context, runnerScaleSetID int, owner string) (*MessageSession, error) {
+ path := fmt.Sprintf("%s/%d/sessions", scaleSetEndpoint, runnerScaleSetID)
+
+ newSession := params.RunnerScaleSetSession{
+ OwnerName: owner,
+ }
+
+ requestData, err := json.Marshal(newSession)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal session data: %w", err)
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodPost, path, bytes.NewBuffer(requestData))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute request to %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var createdSession params.RunnerScaleSetSession
+ if err := json.NewDecoder(resp.Body).Decode(&createdSession); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ msgSessionCtx := garmUtil.WithSlogContext(
+ ctx,
+ slog.Any("session_id", createdSession.SessionID.String()))
+ sess := &MessageSession{
+ ssCli: s,
+ session: &createdSession,
+ ctx: msgSessionCtx,
+ done: make(chan struct{}),
+ closed: false,
+ }
+ go sess.loop()
+
+ return sess, nil
+}
+
+func (s *ScaleSetClient) DeleteMessageSession(ctx context.Context, session *MessageSession) error {
+ path, err := session.SessionsRelativeURL()
+ if err != nil {
+ return fmt.Errorf("failed to delete session: %w", err)
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create message delete request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("failed to delete message session: %w", err)
+ }
+ }
+ defer resp.Body.Close()
+ return nil
+}
diff --git a/util/github/scalesets/runners.go b/util/github/scalesets/runners.go
new file mode 100644
index 00000000..79c321bc
--- /dev/null
+++ b/util/github/scalesets/runners.go
@@ -0,0 +1,154 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+type scaleSetJitRunnerConfig struct {
+ Name string `json:"name"`
+ WorkFolder string `json:"workFolder"`
+}
+
+func (s *ScaleSetClient) GenerateJitRunnerConfig(ctx context.Context, runnerName string, scaleSetID int) (params.RunnerScaleSetJitRunnerConfig, error) {
+ runnerSettings := scaleSetJitRunnerConfig{
+ Name: runnerName,
+ WorkFolder: "_work",
+ }
+
+ body, err := json.Marshal(runnerSettings)
+ if err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, err
+ }
+
+ if err := s.ensureAdminInfo(ctx); err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, fmt.Errorf("failed to ensure admin info: %w", err)
+ }
+
+ jitConfigPath := fmt.Sprintf("%s/%d/generatejitconfig", scaleSetEndpoint, scaleSetID)
+ req, err := s.newActionsRequest(ctx, http.MethodPost, jitConfigPath, bytes.NewBuffer(body))
+ if err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var runnerJitConfig params.RunnerScaleSetJitRunnerConfig
+ if err := json.NewDecoder(resp.Body).Decode(&runnerJitConfig); err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return runnerJitConfig, nil
+}
+
+func (s *ScaleSetClient) GetRunner(ctx context.Context, runnerID int64) (params.RunnerReference, error) {
+ path := fmt.Sprintf("%s/%d", runnerEndpoint, runnerID)
+
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerReference{}, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerReference{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var runnerReference params.RunnerReference
+ if err := json.NewDecoder(resp.Body).Decode(&runnerReference); err != nil {
+ return params.RunnerReference{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return runnerReference, nil
+}
+
+func (s *ScaleSetClient) ListAllRunners(ctx context.Context) (params.RunnerReferenceList, error) {
+ req, err := s.newActionsRequest(ctx, http.MethodGet, runnerEndpoint, nil)
+ if err != nil {
+ return params.RunnerReferenceList{}, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerReferenceList{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var runnerList params.RunnerReferenceList
+ if err := json.NewDecoder(resp.Body).Decode(&runnerList); err != nil {
+ return params.RunnerReferenceList{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return runnerList, nil
+}
+
+func (s *ScaleSetClient) GetRunnerByName(ctx context.Context, runnerName string) (params.RunnerReference, error) {
+ path := fmt.Sprintf("%s?agentName=%s", runnerEndpoint, runnerName)
+
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerReference{}, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerReference{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var runnerList params.RunnerReferenceList
+ if err := json.NewDecoder(resp.Body).Decode(&runnerList); err != nil {
+ return params.RunnerReference{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ if runnerList.Count == 0 {
+ return params.RunnerReference{}, fmt.Errorf("could not find runner with name %q: %w", runnerName, runnerErrors.ErrNotFound)
+ }
+
+ if runnerList.Count > 1 {
+ return params.RunnerReference{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return runnerList.RunnerReferences[0], nil
+}
+
+func (s *ScaleSetClient) RemoveRunner(ctx context.Context, runnerID int64) error {
+ path := fmt.Sprintf("%s/%d", runnerEndpoint, runnerID)
+
+ req, err := s.newActionsRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+
+ resp.Body.Close()
+ return nil
+}
diff --git a/util/github/scalesets/scalesets.go b/util/github/scalesets/scalesets.go
new file mode 100644
index 00000000..2aae493a
--- /dev/null
+++ b/util/github/scalesets/scalesets.go
@@ -0,0 +1,209 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httputil"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+const (
+ runnerEndpoint = "_apis/distributedtask/pools/0/agents"
+ scaleSetEndpoint = "_apis/runtime/runnerscalesets"
+)
+
+const (
+ HeaderActionsActivityID = "ActivityId"
+ HeaderGitHubRequestID = "X-GitHub-Request-Id"
+)
+
+func (s *ScaleSetClient) GetRunnerScaleSetByNameAndRunnerGroup(ctx context.Context, runnerGroupID int, name string) (params.RunnerScaleSet, error) {
+ path := fmt.Sprintf("%s?runnerGroupId=%d&name=%s", scaleSetEndpoint, runnerGroupID, name)
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+ defer resp.Body.Close()
+
+ var runnerScaleSetList *params.RunnerScaleSetsResponse
+ if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSetList); err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ if runnerScaleSetList.Count == 0 {
+ return params.RunnerScaleSet{}, runnerErrors.NewNotFoundError("runner scale set with name %s and runner group ID %d was not found", name, runnerGroupID)
+ }
+
+ // Runner scale sets must have a uniqe name. Attempting to create a runner scale set with the same name as
+ // an existing scale set will result in a Bad Request (400) error.
+ return runnerScaleSetList.RunnerScaleSets[0], nil
+}
+
+func (s *ScaleSetClient) GetRunnerScaleSetByID(ctx context.Context, runnerScaleSetID int) (params.RunnerScaleSet, error) {
+ path := fmt.Sprintf("%s/%d", scaleSetEndpoint, runnerScaleSetID)
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to get runner scaleset with ID %d: %w", runnerScaleSetID, err)
+ }
+ defer resp.Body.Close()
+
+ var runnerScaleSet params.RunnerScaleSet
+ if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSet); err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return runnerScaleSet, nil
+}
+
+// ListRunnerScaleSets lists all runner scale sets in a github entity.
+func (s *ScaleSetClient) ListRunnerScaleSets(ctx context.Context) (*params.RunnerScaleSetsResponse, error) {
+ req, err := s.newActionsRequest(ctx, http.MethodGet, scaleSetEndpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+ data, err := httputil.DumpRequest(req, false)
+ if err == nil {
+ fmt.Println(string(data))
+ }
+ resp, err := s.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list runner scale sets: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var runnerScaleSetList params.RunnerScaleSetsResponse
+ if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSetList); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &runnerScaleSetList, nil
+}
+
+// CreateRunnerScaleSet creates a new runner scale set in the target GitHub entity.
+func (s *ScaleSetClient) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *params.RunnerScaleSet) (params.RunnerScaleSet, error) {
+ body, err := json.Marshal(runnerScaleSet)
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodPost, scaleSetEndpoint, bytes.NewReader(body))
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to create runner scale set: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var createdRunnerScaleSet params.RunnerScaleSet
+ if err := json.NewDecoder(resp.Body).Decode(&createdRunnerScaleSet); err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return createdRunnerScaleSet, nil
+}
+
+func (s *ScaleSetClient) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetID int, runnerScaleSet params.RunnerScaleSet) (params.RunnerScaleSet, error) {
+ path := fmt.Sprintf("%s/%d", scaleSetEndpoint, runnerScaleSetID)
+
+ body, err := json.Marshal(runnerScaleSet)
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to marshal request: %w", err)
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodPatch, path, bytes.NewReader(body))
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to make request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var ret params.RunnerScaleSet
+ if err := json.NewDecoder(resp.Body).Decode(&ret); err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return ret, nil
+}
+
+func (s *ScaleSetClient) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetID int) error {
+ path := fmt.Sprintf("%s/%d", scaleSetEndpoint, runnerScaleSetID)
+ req, err := s.newActionsRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return err
+ }
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusNoContent {
+ return fmt.Errorf("failed to delete scale set with code %d", resp.StatusCode)
+ }
+
+ return nil
+}
+
+func (s *ScaleSetClient) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (params.RunnerGroup, error) {
+ path := fmt.Sprintf("_apis/runtime/runnergroups/?groupName=%s", runnerGroup)
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerGroup{}, err
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerGroup{}, fmt.Errorf("failed to make request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var runnerGroupList params.RunnerGroupList
+ err = json.NewDecoder(resp.Body).Decode(&runnerGroupList)
+ if err != nil {
+ return params.RunnerGroup{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ if runnerGroupList.Count == 0 {
+ return params.RunnerGroup{}, runnerErrors.NewNotFoundError("runner group %s does not exist", runnerGroup)
+ }
+
+ if runnerGroupList.Count > 1 {
+ return params.RunnerGroup{}, runnerErrors.NewConflictError("multiple runner groups exist with the same name (%s)", runnerGroup)
+ }
+
+ return runnerGroupList.RunnerGroups[0], nil
+}
diff --git a/util/github/scalesets/token.go b/util/github/scalesets/token.go
new file mode 100644
index 00000000..1491b748
--- /dev/null
+++ b/util/github/scalesets/token.go
@@ -0,0 +1,105 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *ScaleSetClient) getActionServiceInfo(ctx context.Context) (params.ActionsServiceAdminInfoResponse, error) {
+ regPath := "/actions/runner-registration"
+ baseURL := s.ghCli.GithubBaseURL()
+ url, err := baseURL.Parse(regPath)
+ if err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to parse url: %w", err)
+ }
+
+ entity := s.ghCli.GetEntity()
+ body := params.ActionsServiceAdminInfoRequest{
+ URL: entity.ForgeURL(),
+ RunnerEvent: "register",
+ }
+
+ buf := &bytes.Buffer{}
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+
+ if err := enc.Encode(body); err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, err
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, url.String(), buf)
+ if err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("RemoteAuth %s", *s.runnerRegistrationToken.Token))
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to get actions service admin info: %w", err)
+ }
+ defer resp.Body.Close()
+
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to read response body: %w", err)
+ }
+ data = bytes.TrimPrefix(data, []byte("\xef\xbb\xbf"))
+
+ var info params.ActionsServiceAdminInfoResponse
+ if err := json.Unmarshal(data, &info); err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return info, nil
+}
+
+func (s *ScaleSetClient) ensureAdminInfo(ctx context.Context) error {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ var expiresAt time.Time
+ if s.runnerRegistrationToken != nil {
+ expiresAt = s.runnerRegistrationToken.GetExpiresAt().Time
+ }
+
+ now := time.Now().UTC().Add(2 * time.Minute)
+ if now.After(expiresAt) || s.runnerRegistrationToken == nil {
+ token, _, err := s.ghCli.CreateEntityRegistrationToken(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to fetch runner registration token: %w", err)
+ }
+ s.runnerRegistrationToken = token
+ }
+
+ if s.actionsServiceInfo == nil || s.actionsServiceInfo.ExpiresIn(2*time.Minute) {
+ info, err := s.getActionServiceInfo(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get action service info: %w", err)
+ }
+ s.actionsServiceInfo = &info
+ }
+
+ return nil
+}
diff --git a/util/github/scalesets/util.go b/util/github/scalesets/util.go
new file mode 100644
index 00000000..e8387e63
--- /dev/null
+++ b/util/github/scalesets/util.go
@@ -0,0 +1,65 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+)
+
+func (s *ScaleSetClient) newActionsRequest(ctx context.Context, method, uriPath string, body io.Reader) (*http.Request, error) {
+ if err := s.ensureAdminInfo(ctx); err != nil {
+ return nil, fmt.Errorf("failed to update token: %w", err)
+ }
+
+ actionsURI, err := s.actionsServiceInfo.GetURL()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get pipeline URL: %w", err)
+ }
+
+ pathURI, err := url.Parse(uriPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse path: %w", err)
+ }
+ pathQuery := pathURI.Query()
+ baseQuery := actionsURI.Query()
+ for k, values := range pathQuery {
+ if baseQuery.Get(k) == "" {
+ for _, val := range values {
+ baseQuery.Add(k, val)
+ }
+ }
+ }
+ if baseQuery.Get("api-version") == "" {
+ baseQuery.Set("api-version", "6.0-preview")
+ }
+
+ actionsURI.Path = path.Join(actionsURI.Path, pathURI.Path)
+ actionsURI.RawQuery = baseQuery.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, method, actionsURI.String(), body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.actionsServiceInfo.Token))
+
+ return req, nil
+}
diff --git a/util/logging.go b/util/logging.go
new file mode 100644
index 00000000..99c69da7
--- /dev/null
+++ b/util/logging.go
@@ -0,0 +1,82 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
+
+import (
+ "context"
+ "log/slog"
+)
+
+type slogContextKey string
+
+const (
+ slogCtxFields slogContextKey = "slog_ctx_fields"
+)
+
+var _ slog.Handler = &SlogMultiHandler{}
+
+func WithSlogContext(ctx context.Context, attrs ...slog.Attr) context.Context {
+ return context.WithValue(ctx, slogCtxFields, attrs)
+}
+
+type SlogMultiHandler struct {
+ Handlers []slog.Handler
+}
+
+func (m *SlogMultiHandler) Enabled(ctx context.Context, level slog.Level) bool {
+ // Enabled if any handler is enabled
+ for _, h := range m.Handlers {
+ if h.Enabled(ctx, level) {
+ return true
+ }
+ }
+ return false
+}
+
+func (m *SlogMultiHandler) Handle(ctx context.Context, r slog.Record) error {
+ record := r.Clone()
+ attrs, ok := ctx.Value(slogCtxFields).([]slog.Attr)
+ if ok {
+ for _, v := range attrs {
+ record.AddAttrs(v)
+ }
+ }
+
+ var firstErr error
+ for _, h := range m.Handlers {
+ if err := h.Handle(ctx, record); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+}
+
+func (m *SlogMultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ hs := make([]slog.Handler, len(m.Handlers))
+ for i, h := range m.Handlers {
+ hs[i] = h.WithAttrs(attrs)
+ }
+ return &SlogMultiHandler{
+ Handlers: hs,
+ }
+}
+
+func (m *SlogMultiHandler) WithGroup(name string) slog.Handler {
+ hs := make([]slog.Handler, len(m.Handlers))
+ for i, h := range m.Handlers {
+ hs[i] = h.WithGroup(name)
+ }
+ return &SlogMultiHandler{hs}
+}
diff --git a/util/util.go b/util/util.go
index db2b86f0..dc92ce0e 100644
--- a/util/util.go
+++ b/util/util.go
@@ -16,45 +16,97 @@ package util
import (
"context"
- "crypto/tls"
- "crypto/x509"
"fmt"
"net/http"
+ "unicode/utf8"
- "github.com/cloudbase/garm/params"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
"github.com/cloudbase/garm/runner/common"
-
- "github.com/google/go-github/v53/github"
- "github.com/pkg/errors"
- "golang.org/x/oauth2"
)
-func GithubClient(ctx context.Context, token string, credsDetails params.GithubCredentials) (common.GithubClient, common.GithubEnterpriseClient, error) {
- var roots *x509.CertPool
- if credsDetails.CABundle != nil && len(credsDetails.CABundle) > 0 {
- roots = x509.NewCertPool()
- ok := roots.AppendCertsFromPEM(credsDetails.CABundle)
- if !ok {
- return nil, nil, fmt.Errorf("failed to parse CA cert")
+func FetchTools(ctx context.Context, cli common.GithubClient) ([]commonParams.RunnerApplicationDownload, error) {
+ tools, ghResp, err := cli.ListEntityRunnerApplicationDownloads(ctx)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return nil, fmt.Errorf("error fetching tools: %w", runnerErrors.ErrUnauthorized)
+ }
+ return nil, fmt.Errorf("error fetching runner tools: %w", err)
+ }
+
+ ret := []commonParams.RunnerApplicationDownload{}
+ for _, tool := range tools {
+ if tool == nil {
+ continue
+ }
+ ret = append(ret, commonParams.RunnerApplicationDownload(*tool))
+ }
+ return ret, nil
+}
+
+func ASCIIEqualFold(s, t string) bool {
+ // Fast ASCII path for equal-length ASCII strings
+ if len(s) == len(t) && isASCII(s) && isASCII(t) {
+ for i := 0; i < len(s); i++ {
+ a, b := s[i], t[i]
+ if a != b {
+ if 'A' <= a && a <= 'Z' {
+ a = a + 'a' - 'A'
+ }
+ if 'A' <= b && b <= 'Z' {
+ b = b + 'a' - 'A'
+ }
+ if a != b {
+ return false
+ }
+ }
+ }
+ return true
+ }
+
+ // UTF-8 path - handle different byte lengths correctly
+ i, j := 0, 0
+ for i < len(s) && j < len(t) {
+ sr, sizeS := utf8.DecodeRuneInString(s[i:])
+ tr, sizeT := utf8.DecodeRuneInString(t[j:])
+
+ // Handle invalid UTF-8 - they must be identical
+ if sr == utf8.RuneError || tr == utf8.RuneError {
+ // For invalid UTF-8, compare the raw bytes
+ if sr == utf8.RuneError && tr == utf8.RuneError {
+ if sizeS == sizeT && s[i:i+sizeS] == t[j:j+sizeT] {
+ i += sizeS
+ j += sizeT
+ continue
+ }
+ }
+ return false
+ }
+
+ if sr != tr {
+ // Apply ASCII case folding only
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+
+ i += sizeS
+ j += sizeT
+ }
+ return i == len(s) && j == len(t)
+}
+
+func isASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= 0x80 {
+ return false
}
}
- httpTransport := &http.Transport{
- TLSClientConfig: &tls.Config{
- ClientCAs: roots,
- },
- }
- httpClient := &http.Client{Transport: httpTransport}
- ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
-
- ts := oauth2.StaticTokenSource(
- &oauth2.Token{AccessToken: token},
- )
- tc := oauth2.NewClient(ctx, ts)
-
- ghClient, err := github.NewEnterpriseClient(credsDetails.APIBaseURL, credsDetails.UploadBaseURL, tc)
- if err != nil {
- return nil, nil, errors.Wrap(err, "fetching github client")
- }
-
- return ghClient.Actions, ghClient.Enterprise, nil
+ return true
}
diff --git a/util/util_test.go b/util/util_test.go
new file mode 100644
index 00000000..f04dab84
--- /dev/null
+++ b/util/util_test.go
@@ -0,0 +1,394 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
+
+import (
+ "testing"
+)
+
+func TestASCIIEqualFold(t *testing.T) {
+ tests := []struct {
+ name string
+ s string
+ t string
+ expected bool
+ reason string
+ }{
+ // Basic ASCII case folding tests
+ {
+ name: "identical strings",
+ s: "hello",
+ t: "hello",
+ expected: true,
+ reason: "identical strings should match",
+ },
+ {
+ name: "simple case difference",
+ s: "Hello",
+ t: "hello",
+ expected: true,
+ reason: "ASCII case folding should match H/h",
+ },
+ {
+ name: "all uppercase vs lowercase",
+ s: "HELLO",
+ t: "hello",
+ expected: true,
+ reason: "ASCII case folding should match all cases",
+ },
+ {
+ name: "mixed case",
+ s: "HeLLo",
+ t: "hEllO",
+ expected: true,
+ reason: "mixed case should match after folding",
+ },
+
+ // Empty string tests
+ {
+ name: "both empty",
+ s: "",
+ t: "",
+ expected: true,
+ reason: "empty strings should match",
+ },
+ {
+ name: "one empty",
+ s: "hello",
+ t: "",
+ expected: false,
+ reason: "different length strings should not match",
+ },
+ {
+ name: "other empty",
+ s: "",
+ t: "hello",
+ expected: false,
+ reason: "different length strings should not match",
+ },
+
+ // Different content tests
+ {
+ name: "different strings same case",
+ s: "hello",
+ t: "world",
+ expected: false,
+ reason: "different content should not match",
+ },
+ {
+ name: "different strings different case",
+ s: "Hello",
+ t: "World",
+ expected: false,
+ reason: "different content should not match regardless of case",
+ },
+ {
+ name: "different length",
+ s: "hello",
+ t: "hello world",
+ expected: false,
+ reason: "different length strings should not match",
+ },
+
+ // ASCII non-alphabetic characters
+ {
+ name: "numbers and symbols",
+ s: "Hello123!@#",
+ t: "hello123!@#",
+ expected: true,
+ reason: "numbers and symbols should be preserved, only letters folded",
+ },
+ {
+ name: "different numbers",
+ s: "Hello123",
+ t: "Hello124",
+ expected: false,
+ reason: "different numbers should not match",
+ },
+ {
+ name: "different symbols",
+ s: "Hello!",
+ t: "Hello?",
+ expected: false,
+ reason: "different symbols should not match",
+ },
+
+ // URL-specific tests (CORS security focus)
+ {
+ name: "HTTP scheme case",
+ s: "HTTP://example.com",
+ t: "http://example.com",
+ expected: true,
+ reason: "HTTP scheme should be case-insensitive",
+ },
+ {
+ name: "HTTPS scheme case",
+ s: "HTTPS://EXAMPLE.COM",
+ t: "https://example.com",
+ expected: true,
+ reason: "HTTPS scheme and domain should be case-insensitive",
+ },
+ {
+ name: "complex URL case",
+ s: "HTTPS://API.EXAMPLE.COM:8080/PATH",
+ t: "https://api.example.com:8080/path",
+ expected: true,
+ reason: "entire URL should be case-insensitive for ASCII",
+ },
+ {
+ name: "subdomain case",
+ s: "https://API.SUB.EXAMPLE.COM",
+ t: "https://api.sub.example.com",
+ expected: true,
+ reason: "subdomains should be case-insensitive",
+ },
+
+ // Unicode security tests (homograph attack prevention)
+ {
+ name: "cyrillic homograph attack",
+ s: "https://еxample.com", // Cyrillic 'е' (U+0435)
+ t: "https://example.com", // Latin 'e' (U+0065)
+ expected: false,
+ reason: "should block Cyrillic homograph attack",
+ },
+ {
+ name: "mixed cyrillic attack",
+ s: "https://ехample.com", // Cyrillic 'е' and 'х'
+ t: "https://example.com", // Latin 'e' and 'x'
+ expected: false,
+ reason: "should block mixed Cyrillic homograph attack",
+ },
+ {
+ name: "cyrillic 'а' attack",
+ s: "https://exаmple.com", // Cyrillic 'а' (U+0430)
+ t: "https://example.com", // Latin 'a' (U+0061)
+ expected: false,
+ reason: "should block Cyrillic 'а' homograph attack",
+ },
+
+ // Unicode case folding security tests
+ {
+ name: "unicode case folding attack",
+ s: "https://CAFÉ.com", // Latin É (U+00C9)
+ t: "https://café.com", // Latin é (U+00E9)
+ expected: false,
+ reason: "should NOT perform Unicode case folding (security)",
+ },
+ {
+ name: "turkish i attack",
+ s: "https://İSTANBUL.com", // Turkish İ (U+0130)
+ t: "https://istanbul.com", // Latin i
+ expected: false,
+ reason: "should NOT perform Turkish case folding",
+ },
+ {
+ name: "german sharp s",
+ s: "https://GROß.com", // German ß (U+00DF)
+ t: "https://gross.com", // Expanded form
+ expected: false,
+ reason: "should NOT perform German ß expansion",
+ },
+
+ // Valid Unicode exact matches
+ {
+ name: "identical unicode",
+ s: "https://café.com",
+ t: "https://café.com",
+ expected: true,
+ reason: "identical Unicode strings should match",
+ },
+ {
+ name: "identical cyrillic",
+ s: "https://пример.com", // Russian
+ t: "https://пример.com", // Russian
+ expected: true,
+ reason: "identical Cyrillic strings should match",
+ },
+ {
+ name: "ascii part of unicode domain",
+ s: "HTTPS://café.COM", // ASCII parts should fold
+ t: "https://café.com",
+ expected: true,
+ reason: "ASCII parts should fold even in Unicode strings",
+ },
+
+ // Edge cases with UTF-8
+ {
+ name: "different UTF-8 byte length same rune count",
+ s: "Café", // é is 2 bytes
+ t: "Café", // é is 2 bytes (same)
+ expected: true,
+ reason: "same Unicode content should match",
+ },
+ {
+ name: "UTF-8 normalization difference",
+ s: "café\u0301", // é as e + combining acute (3 bytes for é part)
+ t: "café", // é as single character (2 bytes for é part)
+ expected: false,
+ reason: "different Unicode normalization should not match",
+ },
+ {
+ name: "CRITICAL: current implementation flaw",
+ s: "ABC" + string([]byte{0xC3, 0xA9}), // ABC + é (2 bytes) = 5 bytes
+ t: "abc" + string([]byte{0xC3, 0xA9}), // abc + é (2 bytes) = 5 bytes
+ expected: true,
+ reason: "should match after ASCII folding (this should pass with correct implementation)",
+ },
+ {
+ name: "invalid UTF-8 sequence",
+ s: "hello\xff", // Invalid UTF-8
+ t: "hello\xff", // Invalid UTF-8
+ expected: true,
+ reason: "identical invalid UTF-8 should match",
+ },
+ {
+ name: "different invalid UTF-8",
+ s: "hello\xff", // Invalid UTF-8
+ t: "hello\xfe", // Different invalid UTF-8
+ expected: false,
+ reason: "different invalid UTF-8 should not match",
+ },
+
+ // ASCII boundary tests
+ {
+ name: "ascii boundary characters",
+ s: "A@Z[`a{z", // Test boundaries around A-Z
+ t: "a@z[`A{Z",
+ expected: true,
+ reason: "only A-Z should be folded, not punctuation",
+ },
+ {
+ name: "digit boundaries",
+ s: "Test123ABC",
+ t: "test123abc",
+ expected: true,
+ reason: "digits should not be folded, only letters",
+ },
+
+ // Long string performance tests
+ {
+ name: "long ascii string",
+ s: "HTTP://" + repeatString("ABCDEFGHIJKLMNOPQRSTUVWXYZ", 100) + ".COM",
+ t: "http://" + repeatString("abcdefghijklmnopqrstuvwxyz", 100) + ".com",
+ expected: true,
+ reason: "long ASCII strings should be handled efficiently",
+ },
+ {
+ name: "long unicode string",
+ s: repeatString("CAFÉ", 100),
+ t: repeatString("CAFÉ", 100), // Same case - should match
+ expected: true,
+ reason: "long identical Unicode strings should match",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := ASCIIEqualFold(tt.s, tt.t)
+ if result != tt.expected {
+ t.Errorf("ASCIIEqualFold(%q, %q) = %v, expected %v\nReason: %s",
+ tt.s, tt.t, result, tt.expected, tt.reason)
+ }
+ })
+ }
+}
+
+// Helper function for generating long test strings
+func repeatString(s string, count int) string {
+ if count <= 0 {
+ return ""
+ }
+ result := make([]byte, 0, len(s)*count)
+ for i := 0; i < count; i++ {
+ result = append(result, s...)
+ }
+ return string(result)
+}
+
+// Benchmark tests for performance verification
+func BenchmarkASCIIEqualFold(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ s string
+ t string
+ }{
+ {
+ name: "short_ascii_match",
+ s: "HTTP://EXAMPLE.COM",
+ t: "http://example.com",
+ },
+ {
+ name: "short_ascii_nomatch",
+ s: "HTTP://EXAMPLE.COM",
+ t: "http://different.com",
+ },
+ {
+ name: "long_ascii_match",
+ s: "HTTP://" + repeatString("ABCDEFGHIJKLMNOPQRSTUVWXYZ", 100) + ".COM",
+ t: "http://" + repeatString("abcdefghijklmnopqrstuvwxyz", 100) + ".com",
+ },
+ {
+ name: "unicode_nomatch",
+ s: "https://café.com",
+ t: "https://CAFÉ.com",
+ },
+ {
+ name: "unicode_exact_match",
+ s: "https://café.com",
+ t: "https://café.com",
+ },
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ASCIIEqualFold(bm.s, bm.t)
+ }
+ })
+ }
+}
+
+// Fuzzing test to catch edge cases
+func FuzzASCIIEqualFold(f *testing.F) {
+ // Seed with interesting test cases
+ seeds := [][]string{
+ {"hello", "HELLO"},
+ {"", ""},
+ {"café", "CAFÉ"},
+ {"https://example.com", "HTTPS://EXAMPLE.COM"},
+ {"еxample", "example"}, // Cyrillic attack
+ {string([]byte{0xff}), string([]byte{0xfe})}, // Invalid UTF-8
+ }
+
+ for _, seed := range seeds {
+ f.Add(seed[0], seed[1])
+ }
+
+ f.Fuzz(func(t *testing.T, s1, s2 string) {
+ // Just ensure it doesn't panic and returns a boolean
+ result := ASCIIEqualFold(s1, s2)
+ _ = result // Use the result to prevent optimization
+
+ // Property: function should be symmetric
+ if ASCIIEqualFold(s1, s2) != ASCIIEqualFold(s2, s1) {
+ t.Errorf("ASCIIEqualFold is not symmetric: (%q, %q)", s1, s2)
+ }
+
+ // Property: identical strings should always match
+ if s1 == s2 && !ASCIIEqualFold(s1, s2) {
+ t.Errorf("identical strings should match: %q", s1)
+ }
+ })
+}
diff --git a/vendor/github.com/ProtonMail/go-crypto/LICENSE b/vendor/filippo.io/edwards25519/LICENSE
similarity index 100%
rename from vendor/github.com/ProtonMail/go-crypto/LICENSE
rename to vendor/filippo.io/edwards25519/LICENSE
diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md
new file mode 100644
index 00000000..24e2457d
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/README.md
@@ -0,0 +1,14 @@
+# filippo.io/edwards25519
+
+```
+import "filippo.io/edwards25519"
+```
+
+This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
+Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
+
+The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality.
+
+Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
+
+Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.
diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go
new file mode 100644
index 00000000..ab6aaebc
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/doc.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edwards25519 implements group logic for the twisted Edwards curve
+//
+// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
+//
+// This is better known as the Edwards curve equivalent to Curve25519, and is
+// the curve used by the Ed25519 signature scheme.
+//
+// Most users don't need this package, and should instead use crypto/ed25519 for
+// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
+// github.com/gtank/ristretto255 for prime order group logic.
+//
+// However, developers who do need to interact with low-level edwards25519
+// operations can use this package, which is an extended version of
+// crypto/internal/edwards25519 from the standard library repackaged as
+// an importable module.
+package edwards25519
diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go
new file mode 100644
index 00000000..a744da2c
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/edwards25519.go
@@ -0,0 +1,427 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "errors"
+
+ "filippo.io/edwards25519/field"
+)
+
+// Point types.
+
+type projP1xP1 struct {
+ X, Y, Z, T field.Element
+}
+
+type projP2 struct {
+ X, Y, Z field.Element
+}
+
+// Point represents a point on the edwards25519 curve.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is NOT valid, and it may be used only as a receiver.
+type Point struct {
+ // Make the type not comparable (i.e. used with == or as a map key), as
+ // equivalent points can be represented by different Go values.
+ _ incomparable
+
+ // The point is internally represented in extended coordinates (X, Y, Z, T)
+ // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
+ x, y, z, t field.Element
+}
+
+type incomparable [0]func()
+
+func checkInitialized(points ...*Point) {
+ for _, p := range points {
+ if p.x == (field.Element{}) && p.y == (field.Element{}) {
+ panic("edwards25519: use of uninitialized Point")
+ }
+ }
+}
+
+type projCached struct {
+ YplusX, YminusX, Z, T2d field.Element
+}
+
+type affineCached struct {
+ YplusX, YminusX, T2d field.Element
+}
+
+// Constructors.
+
+func (v *projP2) Zero() *projP2 {
+ v.X.Zero()
+ v.Y.One()
+ v.Z.One()
+ return v
+}
+
+// identity is the point at infinity.
+var identity, _ = new(Point).SetBytes([]byte{
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+
+// NewIdentityPoint returns a new Point set to the identity.
+func NewIdentityPoint() *Point {
+ return new(Point).Set(identity)
+}
+
+// generator is the canonical curve basepoint. See TestGenerator for the
+// correspondence of this encoding with the values in RFC 8032.
+var generator, _ = new(Point).SetBytes([]byte{
+ 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
+
+// NewGeneratorPoint returns a new Point set to the canonical generator.
+func NewGeneratorPoint() *Point {
+ return new(Point).Set(generator)
+}
+
+func (v *projCached) Zero() *projCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.Z.One()
+ v.T2d.Zero()
+ return v
+}
+
+func (v *affineCached) Zero() *affineCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.T2d.Zero()
+ return v
+}
+
+// Assignments.
+
+// Set sets v = u, and returns v.
+func (v *Point) Set(u *Point) *Point {
+ *v = *u
+ return v
+}
+
+// Encoding.
+
+// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
+// Section 5.1.2.
+func (v *Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytes(&buf)
+}
+
+func (v *Point) bytes(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ var zInv, x, y field.Element
+ zInv.Invert(&v.z) // zInv = 1 / Z
+ x.Multiply(&v.x, &zInv) // x = X / Z
+ y.Multiply(&v.y, &zInv) // y = Y / Z
+
+ out := copyFieldElement(buf, &y)
+ out[31] |= byte(x.IsNegative() << 7)
+ return out
+}
+
+var feOne = new(field.Element).One()
+
+// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
+// represent a valid point on the curve, SetBytes returns nil and an error and
+// the receiver is unchanged. Otherwise, SetBytes returns v.
+//
+// Note that SetBytes accepts all non-canonical encodings of valid points.
+// That is, it follows decoding rules that match most implementations in
+// the ecosystem rather than RFC 8032.
+func (v *Point) SetBytes(x []byte) (*Point, error) {
+ // Specifically, the non-canonical encodings that are accepted are
+ // 1) the ones where the field element is not reduced (see the
+ // (*field.Element).SetBytes docs) and
+ // 2) the ones where the x-coordinate is zero and the sign bit is set.
+ //
+ // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
+ // specifically the "Canonical A, R" section.
+
+ y, err := new(field.Element).SetBytes(x)
+ if err != nil {
+ return nil, errors.New("edwards25519: invalid point encoding length")
+ }
+
+ // -x² + y² = 1 + dx²y²
+ // x² + dx²y² = x²(dy² + 1) = y² - 1
+ // x² = (y² - 1) / (dy² + 1)
+
+ // u = y² - 1
+ y2 := new(field.Element).Square(y)
+ u := new(field.Element).Subtract(y2, feOne)
+
+ // v = dy² + 1
+ vv := new(field.Element).Multiply(y2, d)
+ vv = vv.Add(vv, feOne)
+
+ // x = +√(u/v)
+ xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
+ if wasSquare == 0 {
+ return nil, errors.New("edwards25519: invalid point encoding")
+ }
+
+ // Select the negative square root if the sign bit is set.
+ xxNeg := new(field.Element).Negate(xx)
+ xx = xx.Select(xxNeg, xx, int(x[31]>>7))
+
+ v.x.Set(xx)
+ v.y.Set(y)
+ v.z.One()
+ v.t.Multiply(xx, y) // xy = T / Z
+
+ return v, nil
+}
+
+func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
+ copy(buf[:], v.Bytes())
+ return buf[:]
+}
+
+// Conversions.
+
+func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
+ v.X.Multiply(&p.X, &p.T)
+ v.Y.Multiply(&p.Y, &p.Z)
+ v.Z.Multiply(&p.Z, &p.T)
+ return v
+}
+
+func (v *projP2) FromP3(p *Point) *projP2 {
+ v.X.Set(&p.x)
+ v.Y.Set(&p.y)
+ v.Z.Set(&p.z)
+ return v
+}
+
+func (v *Point) fromP1xP1(p *projP1xP1) *Point {
+ v.x.Multiply(&p.X, &p.T)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Multiply(&p.Z, &p.T)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+func (v *Point) fromP2(p *projP2) *Point {
+ v.x.Multiply(&p.X, &p.Z)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Square(&p.Z)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+// d is a constant in the curve equation.
+var d, _ = new(field.Element).SetBytes([]byte{
+ 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
+ 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
+ 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
+ 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
+var d2 = new(field.Element).Add(d, d)
+
+func (v *projCached) FromP3(p *Point) *projCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.Z.Set(&p.z)
+ v.T2d.Multiply(&p.t, d2)
+ return v
+}
+
+func (v *affineCached) FromP3(p *Point) *affineCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.T2d.Multiply(&p.t, d2)
+
+ var invZ field.Element
+ invZ.Invert(&p.z)
+ v.YplusX.Multiply(&v.YplusX, &invZ)
+ v.YminusX.Multiply(&v.YminusX, &invZ)
+ v.T2d.Multiply(&v.T2d, &invZ)
+ return v
+}
+
+// (Re)addition and subtraction.
+
+// Add sets v = p + q, and returns v.
+func (v *Point) Add(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Add(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+// Subtract sets v = p - q, and returns v.
+func (v *Point) Subtract(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Sub(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&ZZ2, &TT2d)
+ v.T.Subtract(&ZZ2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
+ v.T.Add(&ZZ2, &TT2d) // flipped sign
+ return v
+}
+
+func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&Z2, &TT2d)
+ v.T.Subtract(&Z2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&Z2, &TT2d) // flipped sign
+ v.T.Add(&Z2, &TT2d) // flipped sign
+ return v
+}
+
+// Doubling.
+
+func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
+ var XX, YY, ZZ2, XplusYsq field.Element
+
+ XX.Square(&p.X)
+ YY.Square(&p.Y)
+ ZZ2.Square(&p.Z)
+ ZZ2.Add(&ZZ2, &ZZ2)
+ XplusYsq.Add(&p.X, &p.Y)
+ XplusYsq.Square(&XplusYsq)
+
+ v.Y.Add(&YY, &XX)
+ v.Z.Subtract(&YY, &XX)
+
+ v.X.Subtract(&XplusYsq, &v.Y)
+ v.T.Subtract(&ZZ2, &v.Z)
+ return v
+}
+
+// Negation.
+
+// Negate sets v = -p, and returns v.
+func (v *Point) Negate(p *Point) *Point {
+ checkInitialized(p)
+ v.x.Negate(&p.x)
+ v.y.Set(&p.y)
+ v.z.Set(&p.z)
+ v.t.Negate(&p.t)
+ return v
+}
+
+// Equal returns 1 if v is equivalent to u, and 0 otherwise.
+func (v *Point) Equal(u *Point) int {
+ checkInitialized(v, u)
+
+ var t1, t2, t3, t4 field.Element
+ t1.Multiply(&v.x, &u.z)
+ t2.Multiply(&u.x, &v.z)
+ t3.Multiply(&v.y, &u.z)
+ t4.Multiply(&u.y, &v.z)
+
+ return t1.Equal(&t2) & t3.Equal(&t4)
+}
+
+// Constant-time operations
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *projCached) Select(a, b *projCached, cond int) *projCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.Z.Select(&a.Z, &b.Z, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *projCached) CondNeg(cond int) *projCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *affineCached) CondNeg(cond int) *affineCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go
new file mode 100644
index 00000000..d152d68f
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/extra.go
@@ -0,0 +1,349 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/internal/edwards25519 package.
+
+import (
+ "errors"
+
+ "filippo.io/edwards25519/field"
+)
+
+// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap. Don't change the style without making
+ // sure it doesn't increase the inliner cost.
+ var e [4]field.Element
+ X, Y, Z, T = v.extendedCoordinates(&e)
+ return
+}
+
+func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
+ checkInitialized(v)
+ X = e[0].Set(&v.x)
+ Y = e[1].Set(&v.y)
+ Z = e[2].Set(&v.z)
+ T = e[3].Set(&v.t)
+ return
+}
+
+// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+//
+// If the coordinates are invalid or don't represent a valid point on the curve,
+// SetExtendedCoordinates returns nil and an error and the receiver is
+// unchanged. Otherwise, SetExtendedCoordinates returns v.
+func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
+ if !isOnCurve(X, Y, Z, T) {
+ return nil, errors.New("edwards25519: invalid point coordinates")
+ }
+ v.x.Set(X)
+ v.y.Set(Y)
+ v.z.Set(Z)
+ v.t.Set(T)
+ return v, nil
+}
+
+func isOnCurve(X, Y, Z, T *field.Element) bool {
+ var lhs, rhs field.Element
+ XX := new(field.Element).Square(X)
+ YY := new(field.Element).Square(Y)
+ ZZ := new(field.Element).Square(Z)
+ TT := new(field.Element).Square(T)
+ // -x² + y² = 1 + dx²y²
+ // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
+ // -X² + Y² = Z² + dT²
+ lhs.Subtract(YY, XX)
+ rhs.Multiply(d, TT).Add(&rhs, ZZ)
+ if lhs.Equal(&rhs) != 1 {
+ return false
+ }
+ // xy = T/Z
+ // XY/Z² = T/Z
+ // XY = TZ
+ lhs.Multiply(X, Y)
+ rhs.Multiply(T, Z)
+ return lhs.Equal(&rhs) == 1
+}
+
+// BytesMontgomery converts v to a point on the birationally-equivalent
+// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
+// according to RFC 7748.
+//
+// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
+// to the same value. If v is the identity point, BytesMontgomery returns 32
+// zero bytes, analogously to the X25519 function.
+//
+// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate:
+// while every valid edwards25519 point has a unique u-coordinate Montgomery
+// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond
+// to any edwards25519 point, and every other X25519 input corresponds to two
+// edwards25519 points.
+func (v *Point) BytesMontgomery() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytesMontgomery(&buf)
+}
+
+func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ // RFC 7748, Section 4.1 provides the bilinear map to calculate the
+ // Montgomery u-coordinate
+ //
+ // u = (1 + y) / (1 - y)
+ //
+ // where y = Y / Z.
+
+ var y, recip, u field.Element
+
+ y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z
+ recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
+ u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r
+
+ return copyFieldElement(buf, &u)
+}
+
+// MultByCofactor sets v = 8 * p, and returns v.
+func (v *Point) MultByCofactor(p *Point) *Point {
+ checkInitialized(p)
+ result := projP1xP1{}
+ pp := (&projP2{}).FromP3(p)
+ result.Double(pp)
+ pp.FromP1xP1(&result)
+ result.Double(pp)
+ pp.FromP1xP1(&result)
+ result.Double(pp)
+ return v.fromP1xP1(&result)
+}
+
+// Given k > 0, set s = s**(2*i).
+func (s *Scalar) pow2k(k int) {
+ for i := 0; i < k; i++ {
+ s.Multiply(s, s)
+ }
+}
+
+// Invert sets s to the inverse of a nonzero scalar v, and returns s.
+//
+// If t is zero, Invert returns zero.
+func (s *Scalar) Invert(t *Scalar) *Scalar {
+ // Uses a hardcoded sliding window of width 4.
+ var table [8]Scalar
+ var tt Scalar
+ tt.Multiply(t, t)
+ table[0] = *t
+ for i := 0; i < 7; i++ {
+ table[i+1].Multiply(&table[i], &tt)
+ }
+ // Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15]
+ // so t**k = t[k/2] for odd k
+
+ // To compute the sliding window digits, use the following Sage script:
+
+ // sage: import itertools
+ // sage: def sliding_window(w,k):
+ // ....: digits = []
+ // ....: while k > 0:
+ // ....: if k % 2 == 1:
+ // ....: kmod = k % (2**w)
+ // ....: digits.append(kmod)
+ // ....: k = k - kmod
+ // ....: else:
+ // ....: digits.append(0)
+ // ....: k = k // 2
+ // ....: return digits
+
+ // Now we can compute s roughly as follows:
+
+ // sage: s = 1
+ // sage: for coeff in reversed(sliding_window(4,l-2)):
+ // ....: s = s*s
+ // ....: if coeff > 0 :
+ // ....: s = s*t**coeff
+
+ // This works on one bit at a time, with many runs of zeros.
+ // The digits can be collapsed into [(count, coeff)] as follows:
+
+ // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
+
+ // Entries of the form (k, 0) turn into pow2k(k)
+ // Entries of the form (1, coeff) turn into a squaring and then a table lookup.
+ // We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
+
+ *s = table[1/2]
+ s.pow2k(127 + 1)
+ s.Multiply(s, &table[1/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[13/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[5/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[1/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(5 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(9 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[13/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[11/2])
+
+ return s
+}
+
+// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends only on the lengths of the two slices, which must match.
+func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+ if len(scalars) != len(points) {
+ panic("edwards25519: called MultiScalarMult with different size inputs")
+ }
+ checkInitialized(points...)
+
+ // Proceed as in the single-base case, but share doublings
+ // between each point in the multiscalar equation.
+
+ // Build lookup tables for each point
+ tables := make([]projLookupTable, len(points))
+ for i := range tables {
+ tables[i].FromP3(points[i])
+ }
+ // Compute signed radix-16 digits for each scalar
+ digits := make([][64]int8, len(scalars))
+ for i := range digits {
+ digits[i] = scalars[i].signedRadix16()
+ }
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ // Lookup-and-add the appropriate multiple of each input point
+ for j := range tables {
+ tables[j].SelectInto(multiple, digits[j][63])
+ tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
+ v.fromP1xP1(tmp1) // update v
+ }
+ tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+ for i := 62; i >= 0; i-- {
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ // Lookup-and-add the appropriate multiple of each input point
+ for j := range tables {
+ tables[j].SelectInto(multiple, digits[j][i])
+ tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
+ v.fromP1xP1(tmp1) // update v
+ }
+ tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+ }
+ return v
+}
+
+// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+ if len(scalars) != len(points) {
+ panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
+ }
+ checkInitialized(points...)
+
+ // Generalize double-base NAF computation to arbitrary sizes.
+ // Here all the points are dynamic, so we only use the smaller
+ // tables.
+
+ // Build lookup tables for each point
+ tables := make([]nafLookupTable5, len(points))
+ for i := range tables {
+ tables[i].FromP3(points[i])
+ }
+ // Compute a NAF for each scalar
+ nafs := make([][256]int8, len(scalars))
+ for i := range nafs {
+ nafs[i] = scalars[i].nonAdjacentForm(5)
+ }
+
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ //
+ // Skip trying to find the first nonzero coefficent, because
+ // searching might be more work than a few extra doublings.
+ for i := 255; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ for j := range nafs {
+ if nafs[j][i] > 0 {
+ v.fromP1xP1(tmp1)
+ tables[j].SelectInto(multiple, nafs[j][i])
+ tmp1.Add(v, multiple)
+ } else if nafs[j][i] < 0 {
+ v.fromP1xP1(tmp1)
+ tables[j].SelectInto(multiple, -nafs[j][i])
+ tmp1.Sub(v, multiple)
+ }
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go
similarity index 90%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
rename to vendor/filippo.io/edwards25519/field/fe.go
index ca841ad9..5518ef2b 100644
--- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
+++ b/vendor/filippo.io/edwards25519/field/fe.go
@@ -8,6 +8,7 @@ package field
import (
"crypto/subtle"
"encoding/binary"
+ "errors"
"math/bits"
)
@@ -92,7 +93,7 @@ func (v *Element) Add(a, b *Element) *Element {
// Using the generic implementation here is actually faster than the
// assembly. Probably because the body of this function is so simple that
// the compiler can figure out better optimizations by inlining the carry
- // propagation. TODO
+ // propagation.
return v.carryPropagateGeneric()
}
@@ -186,14 +187,17 @@ func (v *Element) Set(a *Element) *Element {
return v
}
-// SetBytes sets v to x, which must be a 32-byte little-endian encoding.
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
//
// Consistent with RFC 7748, the most significant bit (the high bit of the
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
-// are accepted. Note that this is laxer than specified by RFC 8032.
-func (v *Element) SetBytes(x []byte) *Element {
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
if len(x) != 32 {
- panic("edwards25519: invalid field element input size")
+ return nil, errors.New("edwards25519: invalid field element input size")
}
// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
@@ -208,12 +212,12 @@ func (v *Element) SetBytes(x []byte) *Element {
// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
v.l3 &= maskLow51Bits
- // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
// Note: not bytes 25:33, shift 4, to avoid overread.
v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
v.l4 &= maskLow51Bits
- return v
+ return v, nil
}
// Bytes returns the canonical 32-byte little-endian encoding of v.
@@ -391,26 +395,26 @@ var sqrtM1 = &Element{1718705420411056, 234908883556509,
// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
// and returns r and 0.
-func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
- var a, b Element
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+ t0 := new(Element)
// r = (u * v3) * (u * v7)^((p-5)/8)
- v2 := a.Square(v)
- uv3 := b.Multiply(u, b.Multiply(v2, v))
- uv7 := a.Multiply(uv3, a.Square(v2))
- r.Multiply(uv3, r.Pow22523(uv7))
+ v2 := new(Element).Square(v)
+ uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+ uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+ rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
- check := a.Multiply(v, a.Square(r)) // check = v * r^2
+ check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
- uNeg := b.Negate(u)
+ uNeg := new(Element).Negate(u)
correctSignSqrt := check.Equal(u)
flippedSignSqrt := check.Equal(uNeg)
- flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
+ flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
- rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
+ rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
- r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
+ rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
- r.Absolute(r) // Choose the nonnegative square root.
+ r.Absolute(rr) // Choose the nonnegative square root.
return r, correctSignSqrt | flippedSignSqrt
}
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
rename to vendor/filippo.io/edwards25519/field/fe_amd64.go
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
rename to vendor/filippo.io/edwards25519/field/fe_amd64.s
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
rename to vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
rename to vendor/filippo.io/edwards25519/field/fe_arm64.go
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s
similarity index 97%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
rename to vendor/filippo.io/edwards25519/field/fe_arm64.s
index 5c91e458..3126a434 100644
--- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build arm64 && gc && !purego
-// +build arm64,gc,!purego
#include "textflag.h"
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
rename to vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go
new file mode 100644
index 00000000..1ef503b9
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_extra.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "errors"
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/ed25519/edwards25519/field package.
+
+// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
+// is reduced modulo the field order. If x is not of the right length,
+// SetWideBytes returns nil and an error, and the receiver is unchanged.
+//
+// SetWideBytes is not necessary to select a uniformly distributed value, and is
+// only provided for compatibility: SetBytes can be used instead as the chance
+// of bias is less than 2⁻²⁵⁰.
+func (v *Element) SetWideBytes(x []byte) (*Element, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetWideBytes input size")
+ }
+
+ // Split the 64 bytes into two elements, and extract the most significant
+ // bit of each, which is ignored by SetBytes.
+ lo, _ := new(Element).SetBytes(x[:32])
+ loMSB := uint64(x[31] >> 7)
+ hi, _ := new(Element).SetBytes(x[32:])
+ hiMSB := uint64(x[63] >> 7)
+
+ // The output we want is
+ //
+ // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
+ //
+ // which applying the reduction identity comes out to
+ //
+ // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
+ //
+ // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
+ // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
+ // (hiMSB * 2 * 19²), so it fits in a uint64.
+
+ v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
+ v.l1 = lo.l1 + hi.l1*2*19
+ v.l2 = lo.l2 + hi.l2*2*19
+ v.l3 = lo.l3 + hi.l3*2*19
+ v.l4 = lo.l4 + hi.l4*2*19
+
+ return v.carryPropagate(), nil
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go
similarity index 96%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
rename to vendor/filippo.io/edwards25519/field/fe_generic.go
index 2671217d..86f5fd95 100644
--- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
+++ b/vendor/filippo.io/edwards25519/field/fe_generic.go
@@ -156,7 +156,7 @@ func feMulGeneric(v, a, b *Element) {
rr4 := r4.lo&maskLow51Bits + c3
// Now all coefficients fit into 64-bit registers but are still too large to
- // be passed around as a Element. We therefore do one last carry chain,
+ // be passed around as an Element. We therefore do one last carry chain,
// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
*v = Element{rr0, rr1, rr2, rr3, rr4}
v.carryPropagate()
@@ -246,7 +246,7 @@ func feSquareGeneric(v, a *Element) {
}
// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
-// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
func (v *Element) carryPropagateGeneric() *Element {
c0 := v.l0 >> 51
c1 := v.l1 >> 51
@@ -254,6 +254,8 @@ func (v *Element) carryPropagateGeneric() *Element {
c3 := v.l3 >> 51
c4 := v.l4 >> 51
+ // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+ // the final l0 will be at most 52 bits. Similarly for the rest.
v.l0 = v.l0&maskLow51Bits + c4*19
v.l1 = v.l1&maskLow51Bits + c0
v.l2 = v.l2&maskLow51Bits + c1
diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go
new file mode 100644
index 00000000..3fd16538
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalar.go
@@ -0,0 +1,343 @@
+// Copyright (c) 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// A Scalar is an integer modulo
+//
+// l = 2^252 + 27742317777372353535851937790883648493
+//
+// which is the prime order of the edwards25519 group.
+//
+// This type works similarly to math/big.Int, and all arguments and
+// receivers are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Scalar struct {
+ // s is the scalar in the Montgomery domain, in the format of the
+ // fiat-crypto implementation.
+ s fiatScalarMontgomeryDomainFieldElement
+}
+
+// The field implementation in scalar_fiat.go is generated by the fiat-crypto
+// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
+// from a formally verified model.
+//
+// fiat-crypto code comes under the following license.
+//
+// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+// NewScalar returns a new zero Scalar.
+func NewScalar() *Scalar {
+ return &Scalar{}
+}
+
+// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
+// using Multiply and then Add.
+func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
+ // Make a copy of z in case it aliases s.
+ zCopy := new(Scalar).Set(z)
+ return s.Multiply(x, y).Add(s, zCopy)
+}
+
+// Add sets s = x + y mod l, and returns s.
+func (s *Scalar) Add(x, y *Scalar) *Scalar {
+ // s = 1 * x + y mod l
+ fiatScalarAdd(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Subtract sets s = x - y mod l, and returns s.
+func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
+ // s = -1 * y + x mod l
+ fiatScalarSub(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Negate sets s = -x mod l, and returns s.
+func (s *Scalar) Negate(x *Scalar) *Scalar {
+ // s = -1 * x + 0 mod l
+ fiatScalarOpp(&s.s, &x.s)
+ return s
+}
+
+// Multiply sets s = x * y mod l, and returns s.
+func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
+ // s = x * y + 0 mod l
+ fiatScalarMul(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Set sets s = x, and returns s.
+func (s *Scalar) Set(x *Scalar) *Scalar {
+ *s = *x
+ return s
+}
+
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to a uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
+ }
+
+ // We have a value x of 512 bits, but our fiatScalarFromBytes function
+ // expects an input lower than l, which is a little over 252 bits.
+ //
+ // Instead of writing a reduction function that operates on wider inputs, we
+ // can interpret x as the sum of three shorter values a, b, and c.
+ //
+ // x = a + b * 2^168 + c * 2^336 mod l
+ //
+ // We then precompute 2^168 and 2^336 modulo l, and perform the reduction
+ // with two multiplications and two additions.
+
+ s.setShortBytes(x[:21])
+ t := new(Scalar).setShortBytes(x[21:42])
+ s.Add(s, t.Multiply(t, scalarTwo168))
+ t.setShortBytes(x[42:])
+ s.Add(s, t.Multiply(t, scalarTwo336))
+
+ return s, nil
+}
+
+// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
+// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
+// in the 2^256 Montgomery domain.
+var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
+ 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
+var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
+ 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
+
+// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
+// than 32 bytes.
+func (s *Scalar) setShortBytes(x []byte) *Scalar {
+ if len(x) >= 32 {
+ panic("edwards25519: internal error: setShortBytes called with a long string")
+ }
+ var buf [32]byte
+ copy(buf[:], x)
+ fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+ return s
+}
+
+// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
+// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
+// returns nil and an error, and the receiver is unchanged.
+func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
+ if len(x) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ if !isReduced(x) {
+ return nil, errors.New("invalid scalar encoding")
+ }
+
+ fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+
+ return s, nil
+}
+
+// scalarMinusOneBytes is l - 1 in little endian.
+var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
+
+// isReduced returns whether the given scalar in 32-byte little endian encoded
+// form is reduced modulo l.
+func isReduced(s []byte) bool {
+ if len(s) != 32 {
+ return false
+ }
+
+ for i := len(s) - 1; i >= 0; i-- {
+ switch {
+ case s[i] > scalarMinusOneBytes[i]:
+ return false
+ case s[i] < scalarMinusOneBytes[i]:
+ return true
+ }
+ }
+ return true
+}
+
+// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
+// Section 5.1.5 (also known as clamping) and sets s to the result. The input
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
+//
+// Note that since Scalar values are always reduced modulo the prime order of
+// the curve, the resulting value will not preserve any of the cofactor-clearing
+// properties that clamping is meant to provide. It will however work as
+// expected as long as it is applied to points on the prime order subgroup, like
+// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
+// irrelevant RFC 7748 clamping, but it is now required for compatibility.
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
+ // The description above omits the purpose of the high bits of the clamping
+ // for brevity, but those are also lost to reductions, and are also
+ // irrelevant to edwards25519 as they protect against a specific
+ // implementation bug that was once observed in a generic Montgomery ladder.
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
+ }
+
+ // We need to use the wide reduction from SetUniformBytes, since clamping
+ // sets the 2^254 bit, making the value higher than the order.
+ var wideBytes [64]byte
+ copy(wideBytes[:], x[:])
+ wideBytes[0] &= 248
+ wideBytes[31] &= 63
+ wideBytes[31] |= 64
+ return s.SetUniformBytes(wideBytes[:])
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of s.
+func (s *Scalar) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var encoded [32]byte
+ return s.bytes(&encoded)
+}
+
+func (s *Scalar) bytes(out *[32]byte) []byte {
+ var ss fiatScalarNonMontgomeryDomainFieldElement
+ fiatScalarFromMontgomery(&ss, &s.s)
+ fiatScalarToBytes(out, (*[4]uint64)(&ss))
+ return out[:]
+}
+
+// Equal returns 1 if s and t are equal, and 0 otherwise.
+func (s *Scalar) Equal(t *Scalar) int {
+ var diff fiatScalarMontgomeryDomainFieldElement
+ fiatScalarSub(&diff, &s.s, &t.s)
+ var nonzero uint64
+ fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
+ nonzero |= nonzero >> 32
+ nonzero |= nonzero >> 16
+ nonzero |= nonzero >> 8
+ nonzero |= nonzero >> 4
+ nonzero |= nonzero >> 2
+ nonzero |= nonzero >> 1
+ return int(^nonzero) & 1
+}
+
+// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
+//
+// w must be between 2 and 8, or nonAdjacentForm will panic.
+func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
+ // This implementation is adapted from the one
+ // in curve25519-dalek and is documented there:
+ // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+ if w < 2 {
+ panic("w must be at least 2 by the definition of NAF")
+ } else if w > 8 {
+ panic("NAF digits must fit in int8")
+ }
+
+ var naf [256]int8
+ var digits [5]uint64
+
+ for i := 0; i < 4; i++ {
+ digits[i] = binary.LittleEndian.Uint64(b[i*8:])
+ }
+
+ width := uint64(1 << w)
+ windowMask := uint64(width - 1)
+
+ pos := uint(0)
+ carry := uint64(0)
+ for pos < 256 {
+ indexU64 := pos / 64
+ indexBit := pos % 64
+ var bitBuf uint64
+ if indexBit < 64-w {
+ // This window's bits are contained in a single u64
+ bitBuf = digits[indexU64] >> indexBit
+ } else {
+ // Combine the current 64 bits with bits from the next 64
+ bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
+ }
+
+ // Add carry into the current window
+ window := carry + (bitBuf & windowMask)
+
+ if window&1 == 0 {
+ // If the window value is even, preserve the carry and continue.
+ // Why is the carry preserved?
+ // If carry == 0 and window & 1 == 0,
+ // then the next carry should be 0
+ // If carry == 1 and window & 1 == 0,
+ // then bit_buf & 1 == 1 so the next carry should be 1
+ pos += 1
+ continue
+ }
+
+ if window < width/2 {
+ carry = 0
+ naf[pos] = int8(window)
+ } else {
+ carry = 1
+ naf[pos] = int8(window) - int8(width)
+ }
+
+ pos += w
+ }
+ return naf
+}
+
+func (s *Scalar) signedRadix16() [64]int8 {
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+
+ var digits [64]int8
+
+ // Compute unsigned radix-16 digits:
+ for i := 0; i < 32; i++ {
+ digits[2*i] = int8(b[i] & 15)
+ digits[2*i+1] = int8((b[i] >> 4) & 15)
+ }
+
+ // Recenter coefficients:
+ for i := 0; i < 63; i++ {
+ carry := (digits[i] + 8) >> 4
+ digits[i] -= carry << 4
+ digits[i+1] += carry
+ }
+
+ return digits
+}
diff --git a/vendor/filippo.io/edwards25519/scalar_fiat.go b/vendor/filippo.io/edwards25519/scalar_fiat.go
new file mode 100644
index 00000000..2e5782b6
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalar_fiat.go
@@ -0,0 +1,1147 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes
+//
+// curve description: Scalar
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes
+//
+// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package edwards25519
+
+import "math/bits"
+
+type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarMontgomeryDomainFieldElement [4]uint64
+
+// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarNonMontgomeryDomainFieldElement [4]uint64
+
+// fiatScalarCmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// fiatScalarMul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ x19 := (uint64(fiatScalarUint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0x1000000000000000)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ x30 := (uint64(fiatScalarUint1(x29)) + x25)
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x26, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg2[3])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[2])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[1])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[0])
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x48, x45, uint64(0x0))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52)))
+ x55 := (uint64(fiatScalarUint1(x54)) + x42)
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(0x0))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ x76 := (uint64(fiatScalarUint1(x75)) + x71)
+ var x78 uint64
+ _, x78 = bits.Add64(x56, x72, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84)))
+ x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65)))
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg2[3])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg2[2])
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[1])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[0])
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x95, x92, uint64(0x0))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99)))
+ x102 := (uint64(fiatScalarUint1(x101)) + x89)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b)
+ var x115 uint64
+ var x116 uint64
+ x116, x115 = bits.Mul64(x113, 0x1000000000000000)
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed)
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x120, x117, uint64(0x0))
+ x123 := (uint64(fiatScalarUint1(x122)) + x118)
+ var x125 uint64
+ _, x125 = bits.Add64(x103, x119, uint64(0x0))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131)))
+ x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112)))
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg2[3])
+ var x137 uint64
+ var x138 uint64
+ x138, x137 = bits.Mul64(x3, arg2[2])
+ var x139 uint64
+ var x140 uint64
+ x140, x139 = bits.Mul64(x3, arg2[1])
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[0])
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x142, x139, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146)))
+ x149 := (uint64(fiatScalarUint1(x148)) + x136)
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(0x0))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153)))
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155)))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157)))
+ var x160 uint64
+ _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x160, 0x1000000000000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6)
+ var x166 uint64
+ var x167 uint64
+ x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed)
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x167, x164, uint64(0x0))
+ x170 := (uint64(fiatScalarUint1(x169)) + x165)
+ var x172 uint64
+ _, x172 = bits.Add64(x150, x166, uint64(0x0))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178)))
+ x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187)))
+ var x191 uint64
+ _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189)))
+ var x192 uint64
+ fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173)
+ var x193 uint64
+ fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175)
+ var x194 uint64
+ fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177)
+ var x195 uint64
+ fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179)
+ out1[0] = x192
+ out1[1] = x193
+ out1[2] = x194
+ out1[3] = x195
+}
+
+// fiatScalarAdd adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1)
+ var x20 uint64
+ fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3)
+ var x21 uint64
+ fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5)
+ var x22 uint64
+ fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// fiatScalarSub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarOpp negates a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) {
+ x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3])))
+ *out1 = x1
+}
+
+// fiatScalarFromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0x1000000000000000)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x13 uint64
+ _, x13 = bits.Add64(x1, x8, uint64(0x0))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0))
+ var x18 uint64
+ _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x18, 0x1000000000000000)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ var x29 uint64
+ _, x29 = bits.Add64(x16, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39)))
+ var x42 uint64
+ _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b)
+ var x44 uint64
+ var x45 uint64
+ x45, x44 = bits.Mul64(x42, 0x1000000000000000)
+ var x46 uint64
+ var x47 uint64
+ x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6)
+ var x48 uint64
+ var x49 uint64
+ x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed)
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x49, x46, uint64(0x0))
+ var x53 uint64
+ _, x53 = bits.Add64(x36, x48, uint64(0x0))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53)))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ var x77 uint64
+ _, x77 = bits.Add64(x60, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81)))
+ x84 := (uint64(fiatScalarUint1(x83)) + x69)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90)))
+ var x94 uint64
+ _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78)
+ var x96 uint64
+ fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80)
+ var x97 uint64
+ fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82)
+ var x98 uint64
+ fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84)
+ out1[0] = x95
+ out1[1] = x96
+ out1[2] = x97
+ out1[3] = x98
+}
+
+// fiatScalarToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0x1000000000000000)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x25, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d)
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01)
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x31, x43, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56)))
+ var x59 uint64
+ _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x59, 0x1000000000000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed)
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x66, x63, uint64(0x0))
+ var x70 uint64
+ _, x70 = bits.Add64(x51, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x71, x83, uint64(0x0))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96)))
+ var x99 uint64
+ _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x99, 0x1000000000000000)
+ var x103 uint64
+ var x104 uint64
+ x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x106, x103, uint64(0x0))
+ var x110 uint64
+ _, x110 = bits.Add64(x91, x105, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126)))
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128)))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x111, x123, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136)))
+ var x139 uint64
+ _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b)
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x139, 0x1000000000000000)
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed)
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x146, x143, uint64(0x0))
+ var x150 uint64
+ _, x150 = bits.Add64(x131, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154)))
+ x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142)
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163)))
+ var x167 uint64
+ _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165)))
+ var x168 uint64
+ fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151)
+ var x169 uint64
+ fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153)
+ var x170 uint64
+ fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155)
+ var x171 uint64
+ fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157)
+ out1[0] = x168
+ out1[1] = x169
+ out1[2] = x170
+ out1[3] = x171
+}
+
+// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := uint8((x58 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x53
+ out1[28] = x55
+ out1[29] = x57
+ out1[30] = x59
+ out1[31] = x60
+}
+
+// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+ x1 := (uint64(arg1[31]) << 56)
+ x2 := (uint64(arg1[30]) << 48)
+ x3 := (uint64(arg1[29]) << 40)
+ x4 := (uint64(arg1[28]) << 32)
+ x5 := (uint64(arg1[27]) << 24)
+ x6 := (uint64(arg1[26]) << 16)
+ x7 := (uint64(arg1[25]) << 8)
+ x8 := arg1[24]
+ x9 := (uint64(arg1[23]) << 56)
+ x10 := (uint64(arg1[22]) << 48)
+ x11 := (uint64(arg1[21]) << 40)
+ x12 := (uint64(arg1[20]) << 32)
+ x13 := (uint64(arg1[19]) << 24)
+ x14 := (uint64(arg1[18]) << 16)
+ x15 := (uint64(arg1[17]) << 8)
+ x16 := arg1[16]
+ x17 := (uint64(arg1[15]) << 56)
+ x18 := (uint64(arg1[14]) << 48)
+ x19 := (uint64(arg1[13]) << 40)
+ x20 := (uint64(arg1[12]) << 32)
+ x21 := (uint64(arg1[11]) << 24)
+ x22 := (uint64(arg1[10]) << 16)
+ x23 := (uint64(arg1[9]) << 8)
+ x24 := arg1[8]
+ x25 := (uint64(arg1[7]) << 56)
+ x26 := (uint64(arg1[6]) << 48)
+ x27 := (uint64(arg1[5]) << 40)
+ x28 := (uint64(arg1[4]) << 32)
+ x29 := (uint64(arg1[3]) << 24)
+ x30 := (uint64(arg1[2]) << 16)
+ x31 := (uint64(arg1[1]) << 8)
+ x32 := arg1[0]
+ x33 := (x31 + uint64(x32))
+ x34 := (x30 + x33)
+ x35 := (x29 + x34)
+ x36 := (x28 + x35)
+ x37 := (x27 + x36)
+ x38 := (x26 + x37)
+ x39 := (x25 + x38)
+ x40 := (x23 + uint64(x24))
+ x41 := (x22 + x40)
+ x42 := (x21 + x41)
+ x43 := (x20 + x42)
+ x44 := (x19 + x43)
+ x45 := (x18 + x44)
+ x46 := (x17 + x45)
+ x47 := (x15 + uint64(x16))
+ x48 := (x14 + x47)
+ x49 := (x13 + x48)
+ x50 := (x12 + x49)
+ x51 := (x11 + x50)
+ x52 := (x10 + x51)
+ x53 := (x9 + x52)
+ x54 := (x7 + uint64(x8))
+ x55 := (x6 + x54)
+ x56 := (x5 + x55)
+ x57 := (x4 + x56)
+ x58 := (x3 + x57)
+ x59 := (x2 + x58)
+ x60 := (x1 + x59)
+ out1[0] = x39
+ out1[1] = x46
+ out1[2] = x53
+ out1[3] = x60
+}
diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go
new file mode 100644
index 00000000..f7ca3cef
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalarmult.go
@@ -0,0 +1,214 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import "sync"
+
+// basepointTable is a set of 32 affineLookupTables, where table i is generated
+// from 256i * basepoint. It is precomputed the first time it's used.
+func basepointTable() *[32]affineLookupTable {
+ basepointTablePrecomp.initOnce.Do(func() {
+ p := NewGeneratorPoint()
+ for i := 0; i < 32; i++ {
+ basepointTablePrecomp.table[i].FromP3(p)
+ for j := 0; j < 8; j++ {
+ p.Add(p, p)
+ }
+ }
+ })
+ return &basepointTablePrecomp.table
+}
+
+var basepointTablePrecomp struct {
+ table [32]affineLookupTable
+ initOnce sync.Once
+}
+
+// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
+// returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarBaseMult(x *Scalar) *Point {
+ basepointTable := basepointTable()
+
+ // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
+ // as described in the Ed25519 paper
+ //
+ // Group even and odd coefficients
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
+ //
+ // We use a lookup table for each i to get x_i*16^(2*i)*B
+ // and do four doublings to multiply by 16.
+ digits := x.signedRadix16()
+
+ multiple := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+
+ // Accumulate the odd components first
+ v.Set(NewIdentityPoint())
+ for i := 1; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ // Multiply by 16
+ tmp2.FromP3(v) // tmp2 = v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
+ v.fromP1xP1(tmp1) // now v = 16*(odd components)
+
+ // Accumulate the even components
+ for i := 0; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ return v
+}
+
+// ScalarMult sets v = x * q, and returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
+ checkInitialized(q)
+
+ var table projLookupTable
+ table.FromP3(q)
+
+ // Write x = sum(x_i * 16^i)
+ // so x*Q = sum( Q*x_i*16^i )
+ // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
+ // <------compute inside out---------
+ //
+ // We use the lookup table to get the x_i*Q values
+ // and do four doublings to compute 16*Q
+ digits := x.signedRadix16()
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ table.SelectInto(multiple, digits[63])
+
+ v.Set(NewIdentityPoint())
+ tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
+ for i := 62; i >= 0; i-- {
+ tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ table.SelectInto(multiple, digits[i])
+ tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
+ }
+ v.fromP1xP1(tmp1)
+ return v
+}
+
+// basepointNafTable is the nafLookupTable8 for the basepoint.
+// It is precomputed the first time it's used.
+func basepointNafTable() *nafLookupTable8 {
+ basepointNafTablePrecomp.initOnce.Do(func() {
+ basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
+ })
+ return &basepointNafTablePrecomp.table
+}
+
+var basepointNafTablePrecomp struct {
+ table nafLookupTable8
+ initOnce sync.Once
+}
+
+// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
+// generator, and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
+ checkInitialized(A)
+
+ // Similarly to the single variable-base approach, we compute
+ // digits and use them with a lookup table. However, because
+ // we are allowed to do variable-time operations, we don't
+ // need constant-time lookups or constant-time digit
+ // computations.
+ //
+ // So we use a non-adjacent form of some width w instead of
+ // radix 16. This is like a binary representation (one digit
+ // for each binary place) but we allow the digits to grow in
+ // magnitude up to 2^{w-1} so that the nonzero digits are as
+ // sparse as possible. Intuitively, this "condenses" the
+ // "mass" of the scalar onto sparse coefficients (meaning
+ // fewer additions).
+
+ basepointNafTable := basepointNafTable()
+ var aTable nafLookupTable5
+ aTable.FromP3(A)
+ // Because the basepoint is fixed, we can use a wider NAF
+ // corresponding to a bigger table.
+ aNaf := a.nonAdjacentForm(5)
+ bNaf := b.nonAdjacentForm(8)
+
+ // Find the first nonzero coefficient.
+ i := 255
+ for j := i; j >= 0; j-- {
+ if aNaf[j] != 0 || bNaf[j] != 0 {
+ break
+ }
+ }
+
+ multA := &projCached{}
+ multB := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ for ; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ // Only update v if we have a nonzero coeff to add in.
+ if aNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, aNaf[i])
+ tmp1.Add(v, multA)
+ } else if aNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, -aNaf[i])
+ tmp1.Sub(v, multA)
+ }
+
+ if bNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, bNaf[i])
+ tmp1.AddAffine(v, multB)
+ } else if bNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, -bNaf[i])
+ tmp1.SubAffine(v, multB)
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go
new file mode 100644
index 00000000..83234bbc
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/tables.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/subtle"
+)
+
+// A dynamic lookup table for variable-base, constant-time scalar muls.
+type projLookupTable struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, constant-time scalar muls.
+type affineLookupTable struct {
+ points [8]affineCached
+}
+
+// A dynamic lookup table for variable-base, variable-time scalar muls.
+type nafLookupTable5 struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, variable-time scalar muls.
+type nafLookupTable8 struct {
+ points [64]affineCached
+}
+
+// Constructors.
+
+// Builds a lookup table at runtime. Fast.
+func (v *projLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to a projCached
+ // This is needlessly complicated because the API has explicit
+ // receivers instead of creating stack objects and relying on RVO
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *affineLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to affineCached
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
+ }
+}
+
+// Builds a lookup table at runtime. Fast.
+func (v *nafLookupTable5) FromP3(q *Point) {
+ // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
+ // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *nafLookupTable8) FromP3(q *Point) {
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 63; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
+ }
+}
+
+// Selectors.
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
+func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
+ *dest = v.points[x/2]
+}
+
+// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
+func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
+ *dest = v.points[x/2]
+}
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 3651cfa9..235496ee 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -3,13 +3,13 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
-Documentation: https://godocs.io/github.com/BurntSushi/toml
+Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
-This library requires Go 1.13 or newer; add it to your go.mod with:
+This library requires Go 1.18 or newer; add it to your go.mod with:
% go get github.com/BurntSushi/toml@latest
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index 0ca1dc4f..3fa516ca 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -6,7 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"math"
"os"
"reflect"
@@ -18,13 +18,13 @@ import (
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
- UnmarshalTOML(interface{}) error
+ UnmarshalTOML(any) error
}
// Unmarshal decodes the contents of data in TOML format into a pointer v.
//
// See [Decoder] for a description of the decoding process.
-func Unmarshal(data []byte, v interface{}) error {
+func Unmarshal(data []byte, v any) error {
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err
}
@@ -32,12 +32,12 @@ func Unmarshal(data []byte, v interface{}) error {
// Decode the TOML data in to the pointer v.
//
// See [Decoder] for a description of the decoding process.
-func Decode(data string, v interface{}) (MetaData, error) {
+func Decode(data string, v any) (MetaData, error) {
return NewDecoder(strings.NewReader(data)).Decode(v)
}
// DecodeFile reads the contents of a file and decodes it with [Decode].
-func DecodeFile(path string, v interface{}) (MetaData, error) {
+func DecodeFile(path string, v any) (MetaData, error) {
fp, err := os.Open(path)
if err != nil {
return MetaData{}, err
@@ -46,6 +46,17 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
return NewDecoder(fp).Decode(v)
}
+// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
+// [Decode].
+func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) {
+ fp, err := fsys.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
+
// Primitive is a TOML value that hasn't been decoded into a Go value.
//
// This type can be used for any value, which will cause decoding to be delayed.
@@ -58,7 +69,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
// overhead of reflection. They can be useful when you don't know the exact type
// of TOML data until runtime.
type Primitive struct {
- undecoded interface{}
+ undecoded any
context Key
}
@@ -91,7 +102,7 @@ const (
// UnmarshalText method. See the Unmarshaler example for a demonstration with
// email addresses.
//
-// ### Key mapping
+// # Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go struct.
// The special `toml` struct tag can be used to map TOML keys to struct fields
@@ -122,7 +133,7 @@ var (
)
// Decode TOML data in to the pointer `v`.
-func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
+func (dec *Decoder) Decode(v any) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
s := "%q"
@@ -136,8 +147,8 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
}
- // Check if this is a supported type: struct, map, interface{}, or something
- // that implements UnmarshalTOML or UnmarshalText.
+ // Check if this is a supported type: struct, map, any, or something that
+ // implements UnmarshalTOML or UnmarshalText.
rv = indirect(rv)
rt := rv.Type()
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
@@ -148,7 +159,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
// TODO: parser should read from io.Reader? Or at the very least, make it
// read from []byte rather than string
- data, err := ioutil.ReadAll(dec.r)
+ data, err := io.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
}
@@ -179,18 +190,31 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
// will only reflect keys that were decoded. Namely, any keys hidden behind a
// Primitive will be considered undecoded. Executing this method will update the
// undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
+// markDecodedRecursive is a helper to mark any key under the given tmap as
+// decoded, recursing as needed
+func markDecodedRecursive(md *MetaData, tmap map[string]any) {
+ for key := range tmap {
+ md.decoded[md.context.add(key).String()] = struct{}{}
+ if tmap, ok := tmap[key].(map[string]any); ok {
+ md.context = append(md.context, key)
+ markDecodedRecursive(md, tmap)
+ md.context = md.context[0 : len(md.context)-1]
+ }
+ }
+}
+
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unify(data any, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
// TODO: #76 would make this superfluous after implemented.
if rv.Type() == primitiveType {
@@ -207,7 +231,21 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
rvi := rv.Interface()
if v, ok := rvi.(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
+ err := v.UnmarshalTOML(data)
+ if err != nil {
+ return md.parseErr(err)
+ }
+ // Assume the Unmarshaler decoded everything, so mark all keys under
+ // this table as decoded.
+ if tmap, ok := data.(map[string]any); ok {
+ markDecodedRecursive(md, tmap)
+ }
+ if aot, ok := data.([]map[string]any); ok {
+ for _, tmap := range aot {
+ markDecodedRecursive(md, tmap)
+ }
+ }
+ return nil
}
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v)
@@ -227,14 +265,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return md.unifyInt(data, rv)
}
switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
@@ -248,7 +278,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
- if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
+ if rv.NumMethod() > 0 { /// Only empty interfaces are supported.
return md.e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
@@ -258,14 +288,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return md.e("unsupported type %s", rv.Kind())
}
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
+func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]any)
if !ok {
if mapping == nil {
return nil
}
- return md.e("type mismatch for %s: expected table but found %T",
- rv.Type().String(), mapping)
+ return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping))
}
for key, datum := range tmap {
@@ -304,14 +333,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error {
keyType := rv.Type().Key().Kind()
if keyType != reflect.String && keyType != reflect.Interface {
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
keyType, rv.Type())
}
- tmap, ok := mapping.(map[string]interface{})
+ tmap, ok := mapping.(map[string]any)
if !ok {
if tmap == nil {
return nil
@@ -347,7 +376,7 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyArray(data any, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
@@ -361,7 +390,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
return md.unifySliceArray(datav, rv)
}
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifySlice(data any, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
@@ -388,7 +417,7 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyString(data any, rv reflect.Value) error {
_, ok := rv.Interface().(json.Number)
if ok {
if i, ok := data.(int64); ok {
@@ -408,7 +437,7 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
return md.badtype("string", data)
}
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error {
rvk := rv.Kind()
if num, ok := data.(float64); ok {
@@ -429,7 +458,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
- return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()})
}
rv.SetFloat(float64(num))
return nil
@@ -438,7 +467,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
return md.badtype("float", data)
}
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyInt(data any, rv reflect.Value) error {
_, ok := rv.Interface().(time.Duration)
if ok {
// Parse as string duration, and fall back to regular integer parsing
@@ -481,7 +510,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyBool(data any, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
@@ -489,12 +518,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
return md.badtype("boolean", data)
}
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyAnything(data any, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
-func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
+func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case Marshaler:
@@ -523,27 +552,29 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
return md.badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
+ return md.parseErr(err)
}
return nil
}
-func (md *MetaData) badtype(dst string, data interface{}) error {
- return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
+func (md *MetaData) badtype(dst string, data any) error {
+ return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst)
}
func (md *MetaData) parseErr(err error) error {
k := md.context.String()
+ d := string(md.data)
return ParseError{
- LastKey: k,
- Position: md.keyInfo[k].pos,
- Line: md.keyInfo[k].pos.Line,
+ Message: err.Error(),
err: err,
- input: string(md.data),
+ LastKey: k,
+ Position: md.keyInfo[k].pos.withCol(d),
+ Line: md.keyInfo[k].pos.Line,
+ input: d,
}
}
-func (md *MetaData) e(format string, args ...interface{}) error {
+func (md *MetaData) e(format string, args ...any) error {
f := "toml: "
if len(md.context) > 0 {
f = fmt.Sprintf("toml: (last key %q): ", md.context)
@@ -556,7 +587,7 @@ func (md *MetaData) e(format string, args ...interface{}) error {
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
+func rvalue(v any) reflect.Value {
return indirect(reflect.ValueOf(v))
}
@@ -600,3 +631,8 @@ func isUnifiable(rv reflect.Value) bool {
}
return false
}
+
+// fmt %T with "interface {}" replaced with "any", which is far more readable.
+func fmtType(t any) string {
+ return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any")
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go
deleted file mode 100644
index 086d0b68..00000000
--- a/vendor/github.com/BurntSushi/toml/decode_go116.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-package toml
-
-import (
- "io/fs"
-)
-
-// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
-// [Decode].
-func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
- fp, err := fsys.Open(path)
- if err != nil {
- return MetaData{}, err
- }
- defer fp.Close()
- return NewDecoder(fp).Decode(v)
-}
diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go
index c6af3f23..155709a8 100644
--- a/vendor/github.com/BurntSushi/toml/deprecated.go
+++ b/vendor/github.com/BurntSushi/toml/deprecated.go
@@ -5,17 +5,25 @@ import (
"io"
)
+// TextMarshaler is an alias for encoding.TextMarshaler.
+//
// Deprecated: use encoding.TextMarshaler
type TextMarshaler encoding.TextMarshaler
+// TextUnmarshaler is an alias for encoding.TextUnmarshaler.
+//
// Deprecated: use encoding.TextUnmarshaler
type TextUnmarshaler encoding.TextUnmarshaler
+// DecodeReader is an alias for NewDecoder(r).Decode(v).
+//
+// Deprecated: use NewDecoder(reader).Decode(&value).
+func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) }
+
+// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
+//
// Deprecated: use MetaData.PrimitiveDecode.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
+func PrimitiveDecode(primValue Primitive, v any) error {
md := MetaData{decoded: make(map[string]struct{})}
return md.unify(primValue.undecoded, rvalue(v))
}
-
-// Deprecated: use NewDecoder(reader).Decode(&value).
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
index 81a7c0fe..82c90a90 100644
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -2,9 +2,6 @@
//
// This package supports TOML v1.0.0, as specified at https://toml.io
//
-// There is also support for delaying decoding with the Primitive type, and
-// querying the set of keys in a TOML document with the MetaData type.
-//
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
// and can be used to verify if TOML document is valid. It can also be used to
// print the type of each key.
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index 930e1d52..ac196e7d 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -2,6 +2,7 @@ package toml
import (
"bufio"
+ "bytes"
"encoding"
"encoding/json"
"errors"
@@ -76,6 +77,17 @@ type Marshaler interface {
MarshalTOML() ([]byte, error)
}
+// Marshal returns a TOML representation of the Go value.
+//
+// See [Encoder] for a description of the encoding process.
+func Marshal(v any) ([]byte, error) {
+ buff := new(bytes.Buffer)
+ if err := NewEncoder(buff).Encode(v); err != nil {
+ return nil, err
+ }
+ return buff.Bytes(), nil
+}
+
// Encoder encodes a Go to a TOML document.
//
// The mapping between Go values and TOML values should be precisely the same as
@@ -115,28 +127,24 @@ type Marshaler interface {
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct {
- // String to use for a single indentation level; default is two spaces.
- Indent string
-
+ Indent string // string for a single indentation level; default is two spaces.
+ hasWritten bool // written any output to w yet?
w *bufio.Writer
- hasWritten bool // written any output to w yet?
}
// NewEncoder create a new Encoder.
func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
- }
+ return &Encoder{w: bufio.NewWriter(w), Indent: " "}
}
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
//
// An error is returned if the value given cannot be encoded to a valid TOML
// document.
-func (enc *Encoder) Encode(v interface{}) error {
+func (enc *Encoder) Encode(v any) error {
rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ err := enc.safeEncode(Key([]string{}), rv)
+ if err != nil {
return err
}
return enc.w.Flush()
@@ -279,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
enc.wf("nan")
} else if math.IsInf(f, 0) {
- enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("inf")
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
enc.wf("nan")
} else if math.IsInf(f, 0) {
- enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("inf")
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
}
@@ -303,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.Interface:
enc.eElement(rv.Elem())
default:
- encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
+ encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface())))
}
}
@@ -382,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
- var mapKeysDirect, mapKeysSub []string
+ var mapKeysDirect, mapKeysSub []reflect.Value
for _, mapKey := range rv.MapKeys() {
- k := mapKey.String()
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
- mapKeysSub = append(mapKeysSub, k)
+ mapKeysSub = append(mapKeysSub, mapKey)
} else {
- mapKeysDirect = append(mapKeysDirect, k)
+ mapKeysDirect = append(mapKeysDirect, mapKey)
}
}
- var writeMapKeys = func(mapKeys []string, trailC bool) {
- sort.Strings(mapKeys)
+ writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
+ sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
for i, mapKey := range mapKeys {
- val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
+ val := eindirect(rv.MapIndex(mapKey))
if isNil(val) {
continue
}
if inline {
- enc.writeKeyValue(Key{mapKey}, val, true)
+ enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
- enc.encode(key.add(mapKey), val)
+ enc.encode(key.add(mapKey.String()), val)
}
}
}
@@ -421,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
-const is32Bit = (32 << (^uint(0) >> 63)) == 32
-
func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem())
@@ -457,6 +474,15 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i))
+ // Need to make a copy because ... ehm, I don't know why... I guess
+ // allocating a new array can cause it to fail(?)
+ //
+ // Done for: https://github.com/BurntSushi/toml/issues/430
+ // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
+ copyStart := make([]int, len(start))
+ copy(copyStart, start)
+ start = copyStart
+
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
//
@@ -471,50 +497,43 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
- // Copy so it works correct on 32bit archs; not clear why this
- // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
- // This also works fine on 64bit, but 32bit archs are somewhat
- // rare and this is a wee bit faster.
- if is32Bit {
- copyStart := make([]int, len(start))
- copy(copyStart, start)
- fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
- } else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
- }
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
- writeFields := func(fields [][]int) {
+ writeFields := func(fields [][]int, totalFields int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
- fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
-
- if isNil(fieldVal) { /// Don't write anything for nil fields.
- continue
- }
+ fieldVal := rv.FieldByIndex(fieldIndex)
opts := getOptions(fieldType.Tag)
if opts.skip {
continue
}
+ if opts.omitempty && isEmpty(fieldVal) {
+ continue
+ }
+
+ fieldVal = eindirect(fieldVal)
+
+ if isNil(fieldVal) { /// Don't write anything for nil fields.
+ continue
+ }
+
keyName := fieldType.Name
if opts.name != "" {
keyName = opts.name
}
- if opts.omitempty && enc.isEmpty(fieldVal) {
- continue
- }
if opts.omitzero && isZero(fieldVal) {
continue
}
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
- if fieldIndex[0] != len(fields)-1 {
+ if fieldIndex[0] != totalFields-1 {
enc.wf(", ")
}
} else {
@@ -526,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.wf("{")
}
- writeFields(fieldsDirect)
- writeFields(fieldsSub)
+
+ l := len(fieldsDirect) + len(fieldsSub)
+ writeFields(fieldsDirect, l)
+ writeFields(fieldsSub, l)
if inline {
enc.wf("}")
}
@@ -649,7 +670,7 @@ func isZero(rv reflect.Value) bool {
return false
}
-func (enc *Encoder) isEmpty(rv reflect.Value) bool {
+func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
@@ -664,13 +685,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool {
// type b struct{ s []string }
// s := a{field: b{s: []string{"AAA"}}}
for i := 0; i < rv.NumField(); i++ {
- if !enc.isEmpty(rv.Field(i)) {
+ if !isEmpty(rv.Field(i)) {
return false
}
}
return true
case reflect.Bool:
return !rv.Bool()
+ case reflect.Ptr:
+ return rv.IsNil()
}
return false
}
@@ -693,8 +716,11 @@ func (enc *Encoder) newline() {
// v v v v vv
// key = {k = 1, k2 = 2}
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
+ /// Marshaler used on top-level document; call eElement() to just call
+ /// Marshal{TOML,Text}.
if len(key) == 0 {
- encPanic(errNoKey)
+ enc.eElement(val)
+ return
}
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
@@ -703,7 +729,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
}
}
-func (enc *Encoder) wf(format string, v ...interface{}) {
+func (enc *Encoder) wf(format string, v ...any) {
_, err := fmt.Fprintf(enc.w, format, v...)
if err != nil {
encPanic(err)
diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go
index f4f390e6..b7077d3a 100644
--- a/vendor/github.com/BurntSushi/toml/error.go
+++ b/vendor/github.com/BurntSushi/toml/error.go
@@ -67,24 +67,39 @@ type ParseError struct {
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
+ Col int // Error column, starting at 1.
Start int // Start of error, as byte offset starting at 0.
- Len int // Lenght in bytes.
+ Len int // Length of the error in bytes.
+}
+
+func (p Position) withCol(tomlFile string) Position {
+ var (
+ pos int
+ lines = strings.Split(tomlFile, "\n")
+ )
+ for i := range lines {
+ ll := len(lines[i]) + 1 // +1 for the removed newline
+ if pos+ll >= p.Start {
+ p.Col = p.Start - pos + 1
+ if p.Col < 1 { // Should never happen, but just in case.
+ p.Col = 1
+ }
+ break
+ }
+ pos += ll
+ }
+ return p
}
func (pe ParseError) Error() string {
- msg := pe.Message
- if msg == "" { // Error from errorf()
- msg = pe.err.Error()
- }
-
if pe.LastKey == "" {
- return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
+ return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
- pe.Position.Line, pe.LastKey, msg)
+ pe.Position.Line, pe.LastKey, pe.Message)
}
-// ErrorWithUsage() returns the error with detailed location context.
+// ErrorWithPosition returns the error with detailed location context.
//
// See the documentation on [ParseError].
func (pe ParseError) ErrorWithPosition() string {
@@ -92,39 +107,41 @@ func (pe ParseError) ErrorWithPosition() string {
return pe.Error()
}
- var (
- lines = strings.Split(pe.input, "\n")
- col = pe.column(lines)
- b = new(strings.Builder)
- )
-
- msg := pe.Message
- if msg == "" {
- msg = pe.err.Error()
- }
-
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
+ var (
+ lines = strings.Split(pe.input, "\n")
+ b = new(strings.Builder)
+ )
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
- msg, pe.Position.Line, col+1)
+ pe.Message, pe.Position.Line, pe.Position.Col)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
- msg, pe.Position.Line, col, col+pe.Position.Len)
+ pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
}
if pe.Position.Line > 2 {
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
}
if pe.Position.Line > 1 {
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2]))
}
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
- fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
+
+ /// Expand tabs, so that the ^^^s are at the correct position, but leave
+ /// "column 10-13" intact. Adjusting this to the visual column would be
+ /// better, but we don't know the tabsize of the user in their editor, which
+ /// can be 8, 4, 2, or something else. We can't know. So leaving it as the
+ /// character index is probably the "most correct".
+ expanded := expandTab(lines[pe.Position.Line-1])
+ diff := len(expanded) - len(lines[pe.Position.Line-1])
+
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
+ fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
return b.String()
}
-// ErrorWithUsage() returns the error with detailed location context and usage
+// ErrorWithUsage returns the error with detailed location context and usage
// guidance.
//
// See the documentation on [ParseError].
@@ -142,34 +159,47 @@ func (pe ParseError) ErrorWithUsage() string {
return m
}
-func (pe ParseError) column(lines []string) int {
- var pos, col int
- for i := range lines {
- ll := len(lines[i]) + 1 // +1 for the removed newline
- if pos+ll >= pe.Position.Start {
- col = pe.Position.Start - pos
- if col < 0 { // Should never happen, but just in case.
- col = 0
+func expandTab(s string) string {
+ var (
+ b strings.Builder
+ l int
+ fill = func(n int) string {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = ' '
}
- break
+ return string(b)
+ }
+ )
+ b.Grow(len(s))
+ for _, r := range s {
+ switch r {
+ case '\t':
+ tw := 8 - l%8
+ b.WriteString(fill(tw))
+ l += tw
+ default:
+ b.WriteRune(r)
+ l += 1
}
- pos += ll
}
-
- return col
+ return b.String()
}
type (
errLexControl struct{ r rune }
errLexEscape struct{ r rune }
errLexUTF8 struct{ b byte }
- errLexInvalidNum struct{ v string }
- errLexInvalidDate struct{ v string }
+ errParseDate struct{ v string }
errLexInlineTableNL struct{}
errLexStringNL struct{}
errParseRange struct {
- i interface{} // int or float
- size string // "int64", "uint16", etc.
+ i any // int or float
+ size string // "int64", "uint16", etc.
+ }
+ errUnsafeFloat struct {
+ i interface{} // float32 or float64
+ size string // "float32" or "float64"
}
errParseDuration struct{ d string }
)
@@ -183,18 +213,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape
func (e errLexEscape) Usage() string { return usageEscape }
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
func (e errLexUTF8) Usage() string { return "" }
-func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
-func (e errLexInvalidNum) Usage() string { return "" }
-func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
-func (e errLexInvalidDate) Usage() string { return "" }
+func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) }
+func (e errParseDate) Usage() string { return usageDate }
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
func (e errLexStringNL) Usage() string { return usageStringNewline }
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
func (e errParseRange) Usage() string { return usageIntOverflow }
-func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
-func (e errParseDuration) Usage() string { return usageDuration }
+func (e errUnsafeFloat) Error() string {
+ return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size)
+}
+func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat }
+func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
+func (e errParseDuration) Usage() string { return usageDuration }
const usageEscape = `
A '\' inside a "-delimited string is interpreted as an escape character.
@@ -251,19 +283,35 @@ bug in the program that uses too small of an integer.
The maximum and minimum values are:
size │ lowest │ highest
- ───────┼────────────────┼──────────
+ ───────┼────────────────┼──────────────
int8 │ -128 │ 127
int16 │ -32,768 │ 32,767
int32 │ -2,147,483,648 │ 2,147,483,647
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
uint8 │ 0 │ 255
- uint16 │ 0 │ 65535
- uint32 │ 0 │ 4294967295
+ uint16 │ 0 │ 65,535
+ uint32 │ 0 │ 4,294,967,295
uint64 │ 0 │ 1.8 × 10¹⁸
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
`
+const usageUnsafeFloat = `
+This number is outside of the "safe" range for floating point numbers; whole
+(non-fractional) numbers outside the below range can not always be represented
+accurately in a float, leading to some loss of accuracy.
+
+Explicitly mark a number as a fractional unit by adding ".0", which will incur
+some loss of accuracy; for example:
+
+ f = 2_000_000_000.0
+
+Accuracy ranges:
+
+ float32 = 16,777,215
+ float64 = 9,007,199,254,740,991
+`
+
const usageDuration = `
A duration must be as "number", without any spaces. Valid units are:
@@ -277,3 +325,23 @@ A duration must be as "number", without any spaces. Valid units are:
You can combine multiple units; for example "5m10s" for 5 minutes and 10
seconds.
`
+
+const usageDate = `
+A TOML datetime must be in one of the following formats:
+
+ 2006-01-02T15:04:05Z07:00 Date and time, with timezone.
+ 2006-01-02T15:04:05 Date and time, but without timezone.
+ 2006-01-02 Date without a time or timezone.
+ 15:04:05 Just a time, without any timezone.
+
+Seconds may optionally have a fraction, up to nanosecond precision:
+
+ 15:04:05.123
+ 15:04:05.856018510
+`
+
+// TOML 1.1:
+// The seconds part in times is optional, and may be omitted:
+// 2006-01-02T15:04Z07:00
+// 2006-01-02T15:04
+// 15:04
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index d4d70871..1c3b4770 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -17,6 +17,7 @@ const (
itemEOF
itemText
itemString
+ itemStringEsc
itemRawString
itemMultilineString
itemRawMultilineString
@@ -46,12 +47,14 @@ func (p Position) String() string {
}
type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
+ input string
+ start int
+ pos int
+ line int
+ state stateFn
+ items chan item
+ tomlNext bool
+ esc bool
// Allow for backing up up to 4 runes. This is necessary because TOML
// contains 3-rune tokens (""" and ''').
@@ -87,13 +90,14 @@ func (lx *lexer) nextItem() item {
}
}
-func lex(input string) *lexer {
+func lex(input string, tomlNext bool) *lexer {
lx := &lexer{
- input: input,
- state: lexTop,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- line: 1,
+ input: input,
+ state: lexTop,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ line: 1,
+ tomlNext: tomlNext,
}
return lx
}
@@ -162,7 +166,7 @@ func (lx *lexer) next() (r rune) {
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
- if r == utf8.RuneError {
+ if r == utf8.RuneError && w == 1 {
lx.error(errLexUTF8{lx.input[lx.pos]})
return utf8.RuneError
}
@@ -268,10 +272,12 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
}
// errorf is like error, and creates a new error.
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF {
pos := lx.getPos()
- pos.Line--
+ if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
+ pos.Line--
+ }
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@@ -331,9 +337,7 @@ func lexTopEnd(lx *lexer) stateFn {
lx.emit(itemEOF)
return nil
}
- return lx.errorf(
- "expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
- r)
+ return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@@ -408,7 +412,7 @@ func lexTableNameEnd(lx *lexer) stateFn {
// Lexes only one part, e.g. only 'a' inside 'a.b'.
func lexBareName(lx *lexer) stateFn {
r := lx.next()
- if isBareKeyChar(r) {
+ if isBareKeyChar(r, lx.tomlNext) {
return lexBareName
}
lx.backup()
@@ -490,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue)
default:
+ if r == '\n' {
+ return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
+ }
return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@@ -558,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
if r == eof {
return lx.errorf("unexpected EOF; expected value")
}
+ if r == '\n' {
+ return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
+ }
return lx.errorf("expected value but found %q instead", r)
}
@@ -618,6 +628,9 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
+ if lx.tomlNext {
+ return lexSkip(lx, lexInlineTableValue)
+ }
return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#':
lx.push(lexInlineTableValue)
@@ -640,6 +653,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
+ if lx.tomlNext {
+ return lexSkip(lx, lexInlineTableValueEnd)
+ }
return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#':
lx.push(lexInlineTableValueEnd)
@@ -648,6 +664,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
+ if lx.tomlNext {
+ return lexInlineTableValueEnd
+ }
return lx.errorf("trailing comma not allowed in inline tables")
}
return lexInlineTableValue
@@ -687,7 +706,12 @@ func lexString(lx *lexer) stateFn {
return lexStringEscape
case r == '"':
lx.backup()
- lx.emit(itemString)
+ if lx.esc {
+ lx.esc = false
+ lx.emit(itemStringEsc)
+ } else {
+ lx.emit(itemString)
+ }
lx.next()
lx.ignore()
return lx.pop()
@@ -737,6 +761,7 @@ func lexMultilineString(lx *lexer) stateFn {
lx.backup() /// backup: don't include the """ in the item.
lx.backup()
lx.backup()
+ lx.esc = false
lx.emit(itemMultilineString)
lx.next() /// Read over ''' again and discard it.
lx.next()
@@ -770,8 +795,8 @@ func lexRawString(lx *lexer) stateFn {
}
}
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning ''' has already been consumed and
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a
+// string. It assumes that the beginning triple-' has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
@@ -826,8 +851,14 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
}
func lexStringEscape(lx *lexer) stateFn {
+ lx.esc = true
r := lx.next()
switch r {
+ case 'e':
+ if !lx.tomlNext {
+ return lx.error(errLexEscape{r})
+ }
+ fallthrough
case 'b':
fallthrough
case 't':
@@ -846,6 +877,11 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough
case '\\':
return lx.pop()
+ case 'x':
+ if !lx.tomlNext {
+ return lx.error(errLexEscape{r})
+ }
+ return lexHexEscape
case 'u':
return lexShortUnicodeEscape
case 'U':
@@ -854,14 +890,23 @@ func lexStringEscape(lx *lexer) stateFn {
return lx.error(errLexEscape{r})
}
+func lexHexEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 2; i++ {
+ r = lx.next()
+ if !isHex(r) {
+ return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current())
+ }
+ }
+ return lx.pop()
+}
+
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(
- `expected four hexadecimal digits after '\u', but got %q instead`,
- lx.current())
+ if !isHex(r) {
+ return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current())
}
}
return lx.pop()
@@ -871,10 +916,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(
- `expected eight hexadecimal digits after '\U', but got %q instead`,
- lx.current())
+ if !isHex(r) {
+ return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current())
}
}
return lx.pop()
@@ -941,7 +984,7 @@ func lexDatetime(lx *lexer) stateFn {
// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
func lexHexInteger(lx *lexer) stateFn {
r := lx.next()
- if isHexadecimal(r) {
+ if isHex(r) {
return lexHexInteger
}
switch r {
@@ -1075,8 +1118,8 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
return lexOctalInteger
case 'x':
r = lx.peek()
- if !isHexadecimal(r) {
- lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+ if !isHex(r) {
+ lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
}
@@ -1173,7 +1216,7 @@ func (itype itemType) String() string {
return "EOF"
case itemText:
return "Text"
- case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+ case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
@@ -1206,7 +1249,7 @@ func (itype itemType) String() string {
}
func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+ return fmt.Sprintf("(%s, %s)", item.typ, item.val)
}
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
@@ -1222,12 +1265,8 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
-}
-func isBareKeyChar(r rune) bool {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' || r == '-'
+func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
+func isBareKeyChar(r rune, tomlNext bool) bool {
+ return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') || r == '_' || r == '-'
}
diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go
index 71847a04..0d337026 100644
--- a/vendor/github.com/BurntSushi/toml/meta.go
+++ b/vendor/github.com/BurntSushi/toml/meta.go
@@ -13,7 +13,7 @@ type MetaData struct {
context Key // Used only during decoding.
keyInfo map[string]keyInfo
- mapping map[string]interface{}
+ mapping map[string]any
keys []Key
decoded map[string]struct{}
data []byte // Input file; for errors.
@@ -31,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
}
var (
- hash map[string]interface{}
+ hash map[string]any
ok bool
- hashOrVal interface{} = md.mapping
+ hashOrVal any = md.mapping
)
for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ if hash, ok = hashOrVal.(map[string]any); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
@@ -94,28 +94,52 @@ func (md *MetaData) Undecoded() []Key {
type Key []string
func (k Key) String() string {
- ss := make([]string, len(k))
- for i := range k {
- ss[i] = k.maybeQuoted(i)
+ // This is called quite often, so it's a bit funky to make it faster.
+ var b strings.Builder
+ b.Grow(len(k) * 25)
+outer:
+ for i, kk := range k {
+ if i > 0 {
+ b.WriteByte('.')
+ }
+ if kk == "" {
+ b.WriteString(`""`)
+ } else {
+ for _, r := range kk {
+ // "Inline" isBareKeyChar
+ if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') {
+ b.WriteByte('"')
+ b.WriteString(dblQuotedReplacer.Replace(kk))
+ b.WriteByte('"')
+ continue outer
+ }
+ }
+ b.WriteString(kk)
+ }
}
- return strings.Join(ss, ".")
+ return b.String()
}
func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
+ for _, r := range k[i] {
+ if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' {
+ continue
}
+ return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
}
return k[i]
}
+// Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
+
+func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece.
+func (k Key) last() string { return k[len(k)-1] } // last piece of this key.
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index d2542d6f..e3ea8a9a 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -2,6 +2,8 @@ package toml
import (
"fmt"
+ "math"
+ "os"
"strconv"
"strings"
"time"
@@ -15,12 +17,13 @@ type parser struct {
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
pos Position // Current position in the TOML file.
+ tomlNext bool
ordered []Key // List of keys in the order that they appear in the TOML data.
- keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
- mapping map[string]interface{} // Map keyname → key value.
- implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
+ keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
+ mapping map[string]any // Map keyname → key value.
+ implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
}
type keyInfo struct {
@@ -29,6 +32,8 @@ type keyInfo struct {
}
func parse(data string) (p *parser, err error) {
+ _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
+
defer func() {
if r := recover(); r != nil {
if pErr, ok := r.(ParseError); ok {
@@ -41,9 +46,12 @@ func parse(data string) (p *parser, err error) {
}()
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
- // which mangles stuff.
- if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+ // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add
+ // it anyway.
+ if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:]
+ } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
+ data = data[3:]
}
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
@@ -56,7 +64,7 @@ func parse(data string) (p *parser, err error) {
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
- Position: Position{Line: 1, Start: i, Len: 1},
+ Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
@@ -64,10 +72,11 @@ func parse(data string) (p *parser, err error) {
p = &parser{
keyInfo: make(map[string]keyInfo),
- mapping: make(map[string]interface{}),
- lx: lex(data),
+ mapping: make(map[string]any),
+ lx: lex(data, tomlNext),
ordered: make([]Key, 0),
implicits: make(map[string]struct{}),
+ tomlNext: tomlNext,
}
for {
item := p.next()
@@ -82,26 +91,27 @@ func parse(data string) (p *parser, err error) {
func (p *parser) panicErr(it item, err error) {
panic(ParseError{
+ Message: err.Error(),
err: err,
- Position: it.pos,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
}
-func (p *parser) panicItemf(it item, format string, v ...interface{}) {
+func (p *parser) panicItemf(it item, format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
- Position: it.pos,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
}
-func (p *parser) panicf(format string, v ...interface{}) {
+func (p *parser) panicf(format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
- Position: p.pos,
+ Position: p.pos.withCol(p.lx.input),
Line: p.pos.Line,
LastKey: p.current(),
})
@@ -113,10 +123,11 @@ func (p *parser) next() item {
if it.typ == itemError {
if it.err != nil {
panic(ParseError{
- Position: it.pos,
+ Message: it.err.Error(),
+ err: it.err,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Line,
LastKey: p.current(),
- err: it.err,
})
}
@@ -131,7 +142,7 @@ func (p *parser) nextPos() item {
return it
}
-func (p *parser) bug(format string, v ...interface{}) {
+func (p *parser) bug(format string, v ...any) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
@@ -186,20 +197,21 @@ func (p *parser) topLevel(item item) {
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
- p.currentKey = key[len(key)-1]
+ p.currentKey = key.last()
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
- context := key[:len(key)-1]
+ context := key.parent()
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Set value.
vItem := p.next()
val, typ := p.value(vItem, false)
- p.set(p.currentKey, val, typ, vItem.pos)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ, vItem.pos)
/// Remove the context we added (preserving any context from [tbl] lines).
p.context = outerContext
@@ -214,7 +226,7 @@ func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
- case itemString, itemMultilineString,
+ case itemString, itemStringEsc, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it, false)
return s.(string)
@@ -231,12 +243,14 @@ var datetimeRepl = strings.NewReplacer(
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
-func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
+func (p *parser) value(it item, parentIsArray bool) (any, tomlType) {
switch it.typ {
case itemString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemStringEsc:
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString:
- return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
+ return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
@@ -266,7 +280,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
panic("unreachable")
}
-func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+func (p *parser) valueInteger(it item) (any, tomlType) {
if !numUnderscoresOK(it.val) {
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
}
@@ -290,7 +304,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
return num, p.typeOfPrimitive(it)
}
-func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+func (p *parser) valueFloat(it item) (any, tomlType) {
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
@@ -314,7 +328,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
- if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+ signbit := false
+ if val == "+nan" || val == "-nan" {
+ signbit = val == "-nan"
val = "nan"
}
num, err := strconv.ParseFloat(val, 64)
@@ -325,20 +341,29 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
p.panicItemf(it, "Invalid float value: %q", it.val)
}
}
+ if signbit {
+ num = math.Copysign(num, -1)
+ }
return num, p.typeOfPrimitive(it)
}
var dtTypes = []struct {
fmt string
zone *time.Location
+ next bool
}{
- {time.RFC3339Nano, time.Local},
- {"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
- {"2006-01-02", internal.LocalDate},
- {"15:04:05.999999999", internal.LocalTime},
+ {time.RFC3339Nano, time.Local, false},
+ {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
+ {"2006-01-02", internal.LocalDate, false},
+ {"15:04:05.999999999", internal.LocalTime, false},
+
+ // tomlNext
+ {"2006-01-02T15:04Z07:00", time.Local, true},
+ {"2006-01-02T15:04", internal.LocalDatetime, true},
+ {"15:04", internal.LocalTime, true},
}
-func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+func (p *parser) valueDatetime(it item) (any, tomlType) {
it.val = datetimeRepl.Replace(it.val)
var (
t time.Time
@@ -346,28 +371,49 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
err error
)
for _, dt := range dtTypes {
+ if dt.next && !p.tomlNext {
+ continue
+ }
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil {
+ if missingLeadingZero(it.val, dt.fmt) {
+ p.panicErr(it, errParseDate{it.val})
+ }
ok = true
break
}
}
if !ok {
- p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
+ p.panicErr(it, errParseDate{it.val})
}
return t, p.typeOfPrimitive(it)
}
-func (p *parser) valueArray(it item) (interface{}, tomlType) {
+// Go's time.Parse() will accept numbers without a leading zero; there isn't any
+// way to require it. https://github.com/golang/go/issues/29911
+//
+// Depend on the fact that the separators (- and :) should always be at the same
+// location.
+func missingLeadingZero(d, l string) bool {
+ for i, c := range []byte(l) {
+ if c == '.' || c == 'Z' {
+ return false
+ }
+ if (c < '0' || c > '9') && d[i] != c {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *parser) valueArray(it item) (any, tomlType) {
p.setType(p.currentKey, tomlArray, it.pos)
var (
- types []tomlType
-
- // Initialize to a non-nil empty slice. This makes it consistent with
- // how S = [] decodes into a non-nil slice inside something like struct
- // { S []string }. See #338
- array = []interface{}{}
+ // Initialize to a non-nil slice to make it consistent with how S = []
+ // decodes into a non-nil slice inside something like struct { S
+ // []string }. See #338
+ array = make([]any, 0, 2)
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
@@ -377,20 +423,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
val, typ := p.value(it, true)
array = append(array, val)
- types = append(types, typ)
- // XXX: types isn't used here, we need it to record the accurate type
+ // XXX: type isn't used here, we need it to record the accurate type
// information.
//
// Not entirely sure how to best store this; could use "key[0]",
// "key[1]" notation, or maybe store it on the Array type?
+ _ = typ
}
return array, tomlArray
}
-func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) {
var (
- hash = make(map[string]interface{})
+ topHash = make(map[string]any)
outerContext = p.context
outerKey = p.currentKey
)
@@ -418,19 +464,33 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
- p.currentKey = key[len(key)-1]
+ p.currentKey = key.last()
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
- context := key[:len(key)-1]
+ context := key.parent()
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Set the value.
val, typ := p.value(p.next(), false)
- p.set(p.currentKey, val, typ, it.pos)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ, it.pos)
+
+ hash := topHash
+ for _, c := range context {
+ h, ok := hash[c]
+ if !ok {
+ h = make(map[string]any)
+ hash[c] = h
+ }
+ hash, ok = h.(map[string]any)
+ if !ok {
+ p.panicf("%q is not a table", p.context)
+ }
+ }
hash[p.currentKey] = val
/// Restore context.
@@ -438,7 +498,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
}
p.context = outerContext
p.currentKey = outerKey
- return hash, tomlHash
+ return topHash, tomlHash
}
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
@@ -468,9 +528,9 @@ func numUnderscoresOK(s string) bool {
}
}
- // isHexadecimal is a superset of all the permissable characters
- // surrounding an underscore.
- accept = isHexadecimal(r)
+ // isHex is a superset of all the permissible characters surrounding an
+ // underscore.
+ accept = isHex(r)
}
return accept
}
@@ -493,21 +553,19 @@ func numPeriodsOK(s string) bool {
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) addContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
+ /// Always start at the top level and drill down for our context.
hashContext := p.mapping
- keyContext := make(Key, 0)
+ keyContext := make(Key, 0, len(key)-1)
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
+ /// We only need implicit hashes for the parents.
+ for _, k := range key.parent() {
+ _, ok := hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
+ hashContext[k] = make(map[string]any)
}
// If the hash context is actually an array of tables, then set
@@ -516,9 +574,9 @@ func (p *parser) addContext(key Key, array bool) {
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
- case []map[string]interface{}:
+ case []map[string]any:
hashContext = t[len(t)-1]
- case map[string]interface{}:
+ case map[string]any:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
@@ -529,40 +587,33 @@ func (p *parser) addContext(key Key, array bool) {
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
- k := key[len(key)-1]
+ k := key.last()
if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 4)
+ hashContext[k] = make([]map[string]any, 0, 4)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
+ if hash, ok := hashContext[k].([]map[string]any); ok {
+ hashContext[k] = append(hash, make(map[string]any))
} else {
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
}
} else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
+ p.setValue(key.last(), make(map[string]any))
}
- p.context = append(p.context, key[len(key)-1])
-}
-
-// set calls setValue and setType.
-func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
- p.setValue(key, val)
- p.setType(key, typ, pos)
-
+ p.context = append(p.context, key.last())
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
+func (p *parser) setValue(key string, value any) {
var (
- tmpHash interface{}
+ tmpHash any
ok bool
hash = p.mapping
- keyContext Key
+ keyContext = make(Key, 0, len(p.context)+1)
)
for _, k := range p.context {
keyContext = append(keyContext, k)
@@ -570,11 +621,11 @@ func (p *parser) setValue(key string, value interface{}) {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
- case []map[string]interface{}:
+ case []map[string]any:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
- case map[string]interface{}:
+ case map[string]any:
hash = t
default:
p.panicf("Key '%s' has already been defined.", keyContext)
@@ -601,9 +652,8 @@ func (p *parser) setValue(key string, value interface{}) {
p.removeImplicit(keyContext)
return
}
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
+ // Otherwise, we have a concrete key trying to override a previous key,
+ // which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
@@ -632,14 +682,11 @@ func (p *parser) setType(key string, typ tomlType, pos Position) {
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
-func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
-func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
-func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
-func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
-func (p *parser) addImplicitContext(key Key) {
- p.addImplicit(key)
- p.addContext(key, false)
-}
+func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
+func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
+func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
+func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
+func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) }
// current returns the full key name of the current context.
func (p *parser) current() string {
@@ -662,114 +709,131 @@ func stripFirstNewline(s string) string {
return s
}
-// Remove newlines inside triple-quoted strings if a line ends with "\".
+// stripEscapedNewlines removes whitespace after line-ending backslashes in
+// multiline strings.
+//
+// A line-ending backslash is an unescaped \ followed only by whitespace until
+// the next newline. After a line-ending backslash, all whitespace is removed
+// until the next non-whitespace character.
func (p *parser) stripEscapedNewlines(s string) string {
- split := strings.Split(s, "\n")
- if len(split) < 1 {
- return s
- }
+ var (
+ b strings.Builder
+ i int
+ )
+ b.Grow(len(s))
+ for {
+ ix := strings.Index(s[i:], `\`)
+ if ix < 0 {
+ b.WriteString(s)
+ return b.String()
+ }
+ i += ix
- escNL := false // Keep track of the last non-blank line was escaped.
- for i, line := range split {
- line = strings.TrimRight(line, " \t\r")
-
- if len(line) == 0 || line[len(line)-1] != '\\' {
- split[i] = strings.TrimRight(split[i], "\r")
- if !escNL && i != len(split)-1 {
- split[i] += "\n"
+ if len(s) > i+1 && s[i+1] == '\\' {
+ // Escaped backslash.
+ i += 2
+ continue
+ }
+ // Scan until the next non-whitespace.
+ j := i + 1
+ whitespaceLoop:
+ for ; j < len(s); j++ {
+ switch s[j] {
+ case ' ', '\t', '\r', '\n':
+ default:
+ break whitespaceLoop
}
+ }
+ if j == i+1 {
+ // Not a whitespace escape.
+ i++
continue
}
-
- escBS := true
- for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
- escBS = !escBS
- }
- if escNL {
- line = strings.TrimLeft(line, " \t\r")
- }
- escNL = !escBS
-
- if escBS {
- split[i] += "\n"
+ if !strings.Contains(s[i:j], "\n") {
+ // This is not a line-ending backslash. (It's a bad escape sequence,
+ // but we can let replaceEscapes catch it.)
+ i++
continue
}
-
- if i == len(split)-1 {
- p.panicf("invalid escape: '\\ '")
- }
-
- split[i] = line[:len(line)-1] // Remove \
- if len(split)-1 > i {
- split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
- }
+ b.WriteString(s[:i])
+ s = s[j:]
+ i = 0
}
- return strings.Join(split, "")
}
func (p *parser) replaceEscapes(it item, str string) string {
- replaced := make([]rune, 0, len(str))
- s := []byte(str)
- r := 0
- for r < len(s) {
- if s[r] != '\\' {
- c, size := utf8.DecodeRune(s[r:])
- r += size
- replaced = append(replaced, c)
+ var (
+ b strings.Builder
+ skip = 0
+ )
+ b.Grow(len(str))
+ for i, c := range str {
+ if skip > 0 {
+ skip--
continue
}
- r += 1
- if r >= len(s) {
+ if c != '\\' {
+ b.WriteRune(c)
+ continue
+ }
+
+ if i >= len(str) {
p.bug("Escape sequence at end of string.")
return ""
}
- switch s[r] {
+ switch str[i+1] {
default:
- p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ p.bug("Expected valid escape code after \\, but got %q.", str[i+1])
case ' ', '\t':
- p.panicItemf(it, "invalid escape: '\\%c'", s[r])
+ p.panicItemf(it, "invalid escape: '\\%c'", str[i+1])
case 'b':
- replaced = append(replaced, rune(0x0008))
- r += 1
+ b.WriteByte(0x08)
+ skip = 1
case 't':
- replaced = append(replaced, rune(0x0009))
- r += 1
+ b.WriteByte(0x09)
+ skip = 1
case 'n':
- replaced = append(replaced, rune(0x000A))
- r += 1
+ b.WriteByte(0x0a)
+ skip = 1
case 'f':
- replaced = append(replaced, rune(0x000C))
- r += 1
+ b.WriteByte(0x0c)
+ skip = 1
case 'r':
- replaced = append(replaced, rune(0x000D))
- r += 1
+ b.WriteByte(0x0d)
+ skip = 1
+ case 'e':
+ if p.tomlNext {
+ b.WriteByte(0x1b)
+ skip = 1
+ }
case '"':
- replaced = append(replaced, rune(0x0022))
- r += 1
+ b.WriteByte(0x22)
+ skip = 1
case '\\':
- replaced = append(replaced, rune(0x005C))
- r += 1
+ b.WriteByte(0x5c)
+ skip = 1
+ // The lexer guarantees the correct number of characters are present;
+ // don't need to check here.
+ case 'x':
+ if p.tomlNext {
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
+ b.WriteRune(escaped)
+ skip = 3
+ }
case 'u':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+5). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
- replaced = append(replaced, escaped)
- r += 5
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
+ b.WriteRune(escaped)
+ skip = 5
case 'U':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+9). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
- replaced = append(replaced, escaped)
- r += 9
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10])
+ b.WriteRune(escaped)
+ skip = 9
}
}
- return string(replaced)
+ return b.String()
}
-func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
- s := string(bs)
+func (p *parser) asciiEscapeToUnicode(it item, s string) rune {
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
index 254ca82e..10c51f7e 100644
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -25,10 +25,8 @@ type field struct {
// breaking ties with index sequence.
type byName []field
-func (x byName) Len() int { return len(x) }
-
+func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
@@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool {
// byIndex sorts field by index sequence.
type byIndex []field
-func (x byIndex) Len() int { return len(x) }
-
+func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go
index 4e90d773..1c090d33 100644
--- a/vendor/github.com/BurntSushi/toml/type_toml.go
+++ b/vendor/github.com/BurntSushi/toml/type_toml.go
@@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool {
type tomlBaseType string
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
+func (btype tomlBaseType) typeString() string { return string(btype) }
+func (btype tomlBaseType) String() string { return btype.typeString() }
var (
tomlInteger tomlBaseType = "Integer"
@@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
return tomlFloat
case itemDatetime:
return tomlDatetime
- case itemString:
+ case itemString, itemStringEsc:
return tomlString
case itemMultilineString:
return tomlString
diff --git a/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS
deleted file mode 100644
index 2b00ddba..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS
deleted file mode 100644
index 1fbd3e97..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go
deleted file mode 100644
index 3ed3f435..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package bitcurves
-
-// Copyright 2010 The Go Authors. All rights reserved.
-// Copyright 2011 ThePiachu. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bitelliptic implements several Koblitz elliptic curves over prime
-// fields.
-
-// This package operates, internally, on Jacobian coordinates. For a given
-// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
-// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
-// calculation can be performed within the transform (as in ScalarMult and
-// ScalarBaseMult). But even for Add and Double, it's faster to apply and
-// reverse the transform than to operate in affine coordinates.
-
-import (
- "crypto/elliptic"
- "io"
- "math/big"
- "sync"
-)
-
-// A BitCurve represents a Koblitz Curve with a=0.
-// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
-type BitCurve struct {
- Name string
- P *big.Int // the order of the underlying field
- N *big.Int // the order of the base point
- B *big.Int // the constant of the BitCurve equation
- Gx, Gy *big.Int // (x,y) of the base point
- BitSize int // the size of the underlying field
-}
-
-// Params returns the parameters of the given BitCurve (see BitCurve struct)
-func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) {
- cp = new(elliptic.CurveParams)
- cp.Name = bitCurve.Name
- cp.P = bitCurve.P
- cp.N = bitCurve.N
- cp.Gx = bitCurve.Gx
- cp.Gy = bitCurve.Gy
- cp.BitSize = bitCurve.BitSize
- return cp
-}
-
-// IsOnCurve returns true if the given (x,y) lies on the BitCurve.
-func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool {
- // y² = x³ + b
- y2 := new(big.Int).Mul(y, y) //y²
- y2.Mod(y2, bitCurve.P) //y²%P
-
- x3 := new(big.Int).Mul(x, x) //x²
- x3.Mul(x3, x) //x³
-
- x3.Add(x3, bitCurve.B) //x³+B
- x3.Mod(x3, bitCurve.P) //(x³+B)%P
-
- return x3.Cmp(y2) == 0
-}
-
-// affineFromJacobian reverses the Jacobian transform. See the comment at the
-// top of the file.
-func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
- if z.Cmp(big.NewInt(0)) == 0 {
- panic("bitcurve: Can't convert to affine with Jacobian Z = 0")
- }
- // x = YZ^2 mod P
- zinv := new(big.Int).ModInverse(z, bitCurve.P)
- zinvsq := new(big.Int).Mul(zinv, zinv)
-
- xOut = new(big.Int).Mul(x, zinvsq)
- xOut.Mod(xOut, bitCurve.P)
- // y = YZ^3 mod P
- zinvsq.Mul(zinvsq, zinv)
- yOut = new(big.Int).Mul(y, zinvsq)
- yOut.Mod(yOut, bitCurve.P)
- return xOut, yOut
-}
-
-// Add returns the sum of (x1,y1) and (x2,y2)
-func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
- z := new(big.Int).SetInt64(1)
- x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z)
- return bitCurve.affineFromJacobian(x, y, z)
-}
-
-// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
-// (x2, y2, z2) and returns their sum, also in Jacobian form.
-func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
- // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
- z1z1 := new(big.Int).Mul(z1, z1)
- z1z1.Mod(z1z1, bitCurve.P)
- z2z2 := new(big.Int).Mul(z2, z2)
- z2z2.Mod(z2z2, bitCurve.P)
-
- u1 := new(big.Int).Mul(x1, z2z2)
- u1.Mod(u1, bitCurve.P)
- u2 := new(big.Int).Mul(x2, z1z1)
- u2.Mod(u2, bitCurve.P)
- h := new(big.Int).Sub(u2, u1)
- if h.Sign() == -1 {
- h.Add(h, bitCurve.P)
- }
- i := new(big.Int).Lsh(h, 1)
- i.Mul(i, i)
- j := new(big.Int).Mul(h, i)
-
- s1 := new(big.Int).Mul(y1, z2)
- s1.Mul(s1, z2z2)
- s1.Mod(s1, bitCurve.P)
- s2 := new(big.Int).Mul(y2, z1)
- s2.Mul(s2, z1z1)
- s2.Mod(s2, bitCurve.P)
- r := new(big.Int).Sub(s2, s1)
- if r.Sign() == -1 {
- r.Add(r, bitCurve.P)
- }
- r.Lsh(r, 1)
- v := new(big.Int).Mul(u1, i)
-
- x3 := new(big.Int).Set(r)
- x3.Mul(x3, x3)
- x3.Sub(x3, j)
- x3.Sub(x3, v)
- x3.Sub(x3, v)
- x3.Mod(x3, bitCurve.P)
-
- y3 := new(big.Int).Set(r)
- v.Sub(v, x3)
- y3.Mul(y3, v)
- s1.Mul(s1, j)
- s1.Lsh(s1, 1)
- y3.Sub(y3, s1)
- y3.Mod(y3, bitCurve.P)
-
- z3 := new(big.Int).Add(z1, z2)
- z3.Mul(z3, z3)
- z3.Sub(z3, z1z1)
- if z3.Sign() == -1 {
- z3.Add(z3, bitCurve.P)
- }
- z3.Sub(z3, z2z2)
- if z3.Sign() == -1 {
- z3.Add(z3, bitCurve.P)
- }
- z3.Mul(z3, h)
- z3.Mod(z3, bitCurve.P)
-
- return x3, y3, z3
-}
-
-// Double returns 2*(x,y)
-func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
- z1 := new(big.Int).SetInt64(1)
- return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1))
-}
-
-// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
-// returns its double, also in Jacobian form.
-func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
- // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
-
- a := new(big.Int).Mul(x, x) //X1²
- b := new(big.Int).Mul(y, y) //Y1²
- c := new(big.Int).Mul(b, b) //B²
-
- d := new(big.Int).Add(x, b) //X1+B
- d.Mul(d, d) //(X1+B)²
- d.Sub(d, a) //(X1+B)²-A
- d.Sub(d, c) //(X1+B)²-A-C
- d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C)
-
- e := new(big.Int).Mul(big.NewInt(3), a) //3*A
- f := new(big.Int).Mul(e, e) //E²
-
- x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D
- x3.Sub(f, x3) //F-2*D
- x3.Mod(x3, bitCurve.P)
-
- y3 := new(big.Int).Sub(d, x3) //D-X3
- y3.Mul(e, y3) //E*(D-X3)
- y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C
- y3.Mod(y3, bitCurve.P)
-
- z3 := new(big.Int).Mul(y, z) //Y1*Z1
- z3.Mul(big.NewInt(2), z3) //3*Y1*Z1
- z3.Mod(z3, bitCurve.P)
-
- return x3, y3, z3
-}
-
-//TODO: double check if it is okay
-// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
-func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
- // We have a slight problem in that the identity of the group (the
- // point at infinity) cannot be represented in (x, y) form on a finite
- // machine. Thus the standard add/double algorithm has to be tweaked
- // slightly: our initial state is not the identity, but x, and we
- // ignore the first true bit in |k|. If we don't find any true bits in
- // |k|, then we return nil, nil, because we cannot return the identity
- // element.
-
- Bz := new(big.Int).SetInt64(1)
- x := Bx
- y := By
- z := Bz
-
- seenFirstTrue := false
- for _, byte := range k {
- for bitNum := 0; bitNum < 8; bitNum++ {
- if seenFirstTrue {
- x, y, z = bitCurve.doubleJacobian(x, y, z)
- }
- if byte&0x80 == 0x80 {
- if !seenFirstTrue {
- seenFirstTrue = true
- } else {
- x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z)
- }
- }
- byte <<= 1
- }
- }
-
- if !seenFirstTrue {
- return nil, nil
- }
-
- return bitCurve.affineFromJacobian(x, y, z)
-}
-
-// ScalarBaseMult returns k*G, where G is the base point of the group and k is
-// an integer in big-endian form.
-func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
- return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k)
-}
-
-var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
-
-//TODO: double check if it is okay
-// GenerateKey returns a public/private key pair. The private key is generated
-// using the given reader, which must return random data.
-func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) {
- byteLen := (bitCurve.BitSize + 7) >> 3
- priv = make([]byte, byteLen)
-
- for x == nil {
- _, err = io.ReadFull(rand, priv)
- if err != nil {
- return
- }
- // We have to mask off any excess bits in the case that the size of the
- // underlying field is not a whole number of bytes.
- priv[0] &= mask[bitCurve.BitSize%8]
- // This is because, in tests, rand will return all zeros and we don't
- // want to get the point at infinity and loop forever.
- priv[1] ^= 0x42
- x, y = bitCurve.ScalarBaseMult(priv)
- }
- return
-}
-
-// Marshal converts a point into the form specified in section 4.3.6 of ANSI
-// X9.62.
-func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte {
- byteLen := (bitCurve.BitSize + 7) >> 3
-
- ret := make([]byte, 1+2*byteLen)
- ret[0] = 4 // uncompressed point
-
- xBytes := x.Bytes()
- copy(ret[1+byteLen-len(xBytes):], xBytes)
- yBytes := y.Bytes()
- copy(ret[1+2*byteLen-len(yBytes):], yBytes)
- return ret
-}
-
-// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On
-// error, x = nil.
-func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) {
- byteLen := (bitCurve.BitSize + 7) >> 3
- if len(data) != 1+2*byteLen {
- return
- }
- if data[0] != 4 { // uncompressed form
- return
- }
- x = new(big.Int).SetBytes(data[1 : 1+byteLen])
- y = new(big.Int).SetBytes(data[1+byteLen:])
- return
-}
-
-//curve parameters taken from:
-//http://www.secg.org/collateral/sec2_final.pdf
-
-var initonce sync.Once
-var secp160k1 *BitCurve
-var secp192k1 *BitCurve
-var secp224k1 *BitCurve
-var secp256k1 *BitCurve
-
-func initAll() {
- initS160()
- initS192()
- initS224()
- initS256()
-}
-
-func initS160() {
- // See SEC 2 section 2.4.1
- secp160k1 = new(BitCurve)
- secp160k1.Name = "secp160k1"
- secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16)
- secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16)
- secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16)
- secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16)
- secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16)
- secp160k1.BitSize = 160
-}
-
-func initS192() {
- // See SEC 2 section 2.5.1
- secp192k1 = new(BitCurve)
- secp192k1.Name = "secp192k1"
- secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16)
- secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16)
- secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16)
- secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16)
- secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16)
- secp192k1.BitSize = 192
-}
-
-func initS224() {
- // See SEC 2 section 2.6.1
- secp224k1 = new(BitCurve)
- secp224k1.Name = "secp224k1"
- secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16)
- secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16)
- secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16)
- secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16)
- secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16)
- secp224k1.BitSize = 224
-}
-
-func initS256() {
- // See SEC 2 section 2.7.1
- secp256k1 = new(BitCurve)
- secp256k1.Name = "secp256k1"
- secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
- secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
- secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
- secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
- secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
- secp256k1.BitSize = 256
-}
-
-// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1)
-func S160() *BitCurve {
- initonce.Do(initAll)
- return secp160k1
-}
-
-// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1)
-func S192() *BitCurve {
- initonce.Do(initAll)
- return secp192k1
-}
-
-// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1)
-func S224() *BitCurve {
- initonce.Do(initAll)
- return secp224k1
-}
-
-// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1)
-func S256() *BitCurve {
- initonce.Do(initAll)
- return secp256k1
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go
deleted file mode 100644
index cb6676de..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Package brainpool implements Brainpool elliptic curves.
-// Implementation of rcurves is from github.com/ebfe/brainpool
-// Note that these curves are implemented with naive, non-constant time operations
-// and are likely not suitable for environments where timing attacks are a concern.
-package brainpool
-
-import (
- "crypto/elliptic"
- "math/big"
- "sync"
-)
-
-var (
- once sync.Once
- p256t1, p384t1, p512t1 *elliptic.CurveParams
- p256r1, p384r1, p512r1 *rcurve
-)
-
-func initAll() {
- initP256t1()
- initP384t1()
- initP512t1()
- initP256r1()
- initP384r1()
- initP512r1()
-}
-
-func initP256t1() {
- p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"}
- p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16)
- p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16)
- p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16)
- p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16)
- p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16)
- p256t1.BitSize = 256
-}
-
-func initP256r1() {
- twisted := p256t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP256r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16)
- params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16)
- z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16)
- p256r1 = newrcurve(twisted, params, z)
-}
-
-func initP384t1() {
- p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"}
- p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16)
- p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16)
- p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16)
- p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16)
- p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16)
- p384t1.BitSize = 384
-}
-
-func initP384r1() {
- twisted := p384t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP384r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16)
- params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16)
- z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16)
- p384r1 = newrcurve(twisted, params, z)
-}
-
-func initP512t1() {
- p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"}
- p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16)
- p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16)
- p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16)
- p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16)
- p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16)
- p512t1.BitSize = 512
-}
-
-func initP512r1() {
- twisted := p512t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP512r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16)
- params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16)
- z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16)
- p512r1 = newrcurve(twisted, params, z)
-}
-
-// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4)
-func P256t1() elliptic.Curve {
- once.Do(initAll)
- return p256t1
-}
-
-// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4)
-func P256r1() elliptic.Curve {
- once.Do(initAll)
- return p256r1
-}
-
-// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6)
-func P384t1() elliptic.Curve {
- once.Do(initAll)
- return p384t1
-}
-
-// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6)
-func P384r1() elliptic.Curve {
- once.Do(initAll)
- return p384r1
-}
-
-// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7)
-func P512t1() elliptic.Curve {
- once.Do(initAll)
- return p512t1
-}
-
-// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7)
-func P512r1() elliptic.Curve {
- once.Do(initAll)
- return p512r1
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go
deleted file mode 100644
index 2d535508..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package brainpool
-
-import (
- "crypto/elliptic"
- "math/big"
-)
-
-var _ elliptic.Curve = (*rcurve)(nil)
-
-type rcurve struct {
- twisted elliptic.Curve
- params *elliptic.CurveParams
- z *big.Int
- zinv *big.Int
- z2 *big.Int
- z3 *big.Int
- zinv2 *big.Int
- zinv3 *big.Int
-}
-
-var (
- two = big.NewInt(2)
- three = big.NewInt(3)
-)
-
-func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve {
- zinv := new(big.Int).ModInverse(z, params.P)
- return &rcurve{
- twisted: twisted,
- params: params,
- z: z,
- zinv: zinv,
- z2: new(big.Int).Exp(z, two, params.P),
- z3: new(big.Int).Exp(z, three, params.P),
- zinv2: new(big.Int).Exp(zinv, two, params.P),
- zinv3: new(big.Int).Exp(zinv, three, params.P),
- }
-}
-
-func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) {
- var tx, ty big.Int
- tx.Mul(x, curve.z2)
- tx.Mod(&tx, curve.params.P)
- ty.Mul(y, curve.z3)
- ty.Mod(&ty, curve.params.P)
- return &tx, &ty
-}
-
-func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) {
- var x, y big.Int
- x.Mul(tx, curve.zinv2)
- x.Mod(&x, curve.params.P)
- y.Mul(ty, curve.zinv3)
- y.Mod(&y, curve.params.P)
- return &x, &y
-}
-
-func (curve *rcurve) Params() *elliptic.CurveParams {
- return curve.params
-}
-
-func (curve *rcurve) IsOnCurve(x, y *big.Int) bool {
- return curve.twisted.IsOnCurve(curve.toTwisted(x, y))
-}
-
-func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) {
- tx1, ty1 := curve.toTwisted(x1, y1)
- tx2, ty2 := curve.toTwisted(x2, y2)
- return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2))
-}
-
-func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) {
- return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1)))
-}
-
-func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) {
- tx1, ty1 := curve.toTwisted(x1, y1)
- return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar))
-}
-
-func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
- return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar))
-}
\ No newline at end of file
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go
deleted file mode 100644
index 6b6bc7ae..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-// Package eax provides an implementation of the EAX
-// (encrypt-authenticate-translate) mode of operation, as described in
-// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS
-// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY."
-// In FSE'04, volume 3017 of LNCS, 2004
-package eax
-
-import (
- "crypto/cipher"
- "crypto/subtle"
- "errors"
- "github.com/ProtonMail/go-crypto/internal/byteutil"
-)
-
-const (
- defaultTagSize = 16
- defaultNonceSize = 16
-)
-
-type eax struct {
- block cipher.Block // Only AES-{128, 192, 256} supported
- tagSize int // At least 12 bytes recommended
- nonceSize int
-}
-
-func (e *eax) NonceSize() int {
- return e.nonceSize
-}
-
-func (e *eax) Overhead() int {
- return e.tagSize
-}
-
-// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and
-// tag lengths. Supports {128, 192, 256}- bit key length.
-func NewEAX(block cipher.Block) (cipher.AEAD, error) {
- return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize)
-}
-
-// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and
-// given nonce and tag lengths in bytes. Panics on zero nonceSize and
-// exceedingly long tags.
-//
-// It is recommended to use at least 12 bytes as tag length (see, for instance,
-// NIST SP 800-38D).
-//
-// Only to be used for compatibility with existing cryptosystems with
-// non-standard parameters. For all other cases, prefer NewEAX.
-func NewEAXWithNonceAndTagSize(
- block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
- if nonceSize < 1 {
- return nil, eaxError("Cannot initialize EAX with nonceSize = 0")
- }
- if tagSize > block.BlockSize() {
- return nil, eaxError("Custom tag length exceeds blocksize")
- }
- return &eax{
- block: block,
- tagSize: tagSize,
- nonceSize: nonceSize,
- }, nil
-}
-
-func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte {
- if len(nonce) > e.nonceSize {
- panic("crypto/eax: Nonce too long for this instance")
- }
- ret, out := byteutil.SliceForAppend(dst, len(plaintext) + e.tagSize)
- omacNonce := e.omacT(0, nonce)
- omacAdata := e.omacT(1, adata)
-
- // Encrypt message using CTR mode and omacNonce as IV
- ctr := cipher.NewCTR(e.block, omacNonce)
- ciphertextData := out[:len(plaintext)]
- ctr.XORKeyStream(ciphertextData, plaintext)
-
- omacCiphertext := e.omacT(2, ciphertextData)
-
- tag := out[len(plaintext):]
- for i := 0; i < e.tagSize; i++ {
- tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i]
- }
- return ret
-}
-
-func (e* eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
- if len(nonce) > e.nonceSize {
- panic("crypto/eax: Nonce too long for this instance")
- }
- if len(ciphertext) < e.tagSize {
- return nil, eaxError("Ciphertext shorter than tag length")
- }
- sep := len(ciphertext) - e.tagSize
-
- // Compute tag
- omacNonce := e.omacT(0, nonce)
- omacAdata := e.omacT(1, adata)
- omacCiphertext := e.omacT(2, ciphertext[:sep])
-
- tag := make([]byte, e.tagSize)
- for i := 0; i < e.tagSize; i++ {
- tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i]
- }
-
- // Compare tags
- if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 {
- return nil, eaxError("Tag authentication failed")
- }
-
- // Decrypt ciphertext
- ret, out := byteutil.SliceForAppend(dst, len(ciphertext))
- ctr := cipher.NewCTR(e.block, omacNonce)
- ctr.XORKeyStream(out, ciphertext[:sep])
-
- return ret[:sep], nil
-}
-
-// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext)
-func (e *eax) omacT(t byte, plaintext []byte) []byte {
- blockSize := e.block.BlockSize()
- byteT := make([]byte, blockSize)
- byteT[blockSize-1] = t
- concat := append(byteT, plaintext...)
- return e.omac(concat)
-}
-
-func (e *eax) omac(plaintext []byte) []byte {
- blockSize := e.block.BlockSize()
- // L ← E_K(0^n); B ← 2L; P ← 4L
- L := make([]byte, blockSize)
- e.block.Encrypt(L, L)
- B := byteutil.GfnDouble(L)
- P := byteutil.GfnDouble(B)
-
- // CBC with IV = 0
- cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize))
- padded := e.pad(plaintext, B, P)
- cbcCiphertext := make([]byte, len(padded))
- cbc.CryptBlocks(cbcCiphertext, padded)
-
- return cbcCiphertext[len(cbcCiphertext)-blockSize:]
-}
-
-func (e *eax) pad(plaintext, B, P []byte) []byte {
- // if |M| in {n, 2n, 3n, ...}
- blockSize := e.block.BlockSize()
- if len(plaintext) != 0 && len(plaintext)%blockSize == 0 {
- return byteutil.RightXor(plaintext, B)
- }
-
- // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P
- ending := make([]byte, blockSize-len(plaintext)%blockSize)
- ending[0] = 0x80
- padded := append(plaintext, ending...)
- return byteutil.RightXor(padded, P)
-}
-
-func eaxError(err string) error {
- return errors.New("crypto/eax: " + err)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go
deleted file mode 100644
index ddb53d07..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package eax
-
-// Test vectors from
-// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf
-var testVectors = []struct {
- msg, key, nonce, header, ciphertext string
-}{
- {"",
- "233952DEE4D5ED5F9B9C6D6FF80FF478",
- "62EC67F9C3A4A407FCB2A8C49031A8B3",
- "6BFB914FD07EAE6B",
- "E037830E8389F27B025A2D6527E79D01"},
- {"F7FB",
- "91945D3F4DCBEE0BF45EF52255F095A4",
- "BECAF043B0A23D843194BA972C66DEBD",
- "FA3BFD4806EB53FA",
- "19DD5C4C9331049D0BDAB0277408F67967E5"},
- {"1A47CB4933",
- "01F74AD64077F2E704C0F60ADA3DD523",
- "70C3DB4F0D26368400A10ED05D2BFF5E",
- "234A3463C1264AC6",
- "D851D5BAE03A59F238A23E39199DC9266626C40F80"},
- {"481C9E39B1",
- "D07CF6CBB7F313BDDE66B727AFD3C5E8",
- "8408DFFF3C1A2B1292DC199E46B7D617",
- "33CCE2EABFF5A79D",
- "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"},
- {"40D0C07DA5E4",
- "35B6D0580005BBC12B0587124557D2C2",
- "FDB6B06676EEDC5C61D74276E1F8E816",
- "AEB96EAEBE2970E9",
- "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"},
- {"4DE3B35C3FC039245BD1FB7D",
- "BD8E6E11475E60B268784C38C62FEB22",
- "6EAC5C93072D8E8513F750935E46DA1B",
- "D4482D1CA78DCE0F",
- "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"},
- {"8B0A79306C9CE7ED99DAE4F87F8DD61636",
- "7C77D6E813BED5AC98BAA417477A2E7D",
- "1A8C98DCD73D38393B2BF1569DEEFC19",
- "65D2017990D62528",
- "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"},
- {"1BDA122BCE8A8DBAF1877D962B8592DD2D56",
- "5FFF20CAFAB119CA2FC73549E20F5B0D",
- "DDE59B97D722156D4D9AFF2BC7559826",
- "54B9F04E6A09189A",
- "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"},
- {"6CF36720872B8513F6EAB1A8A44438D5EF11",
- "A4A4782BCFFD3EC5E7EF6D8C34A56123",
- "B781FCF2F75FA5A8DE97A9CA48E522EC",
- "899A175897561D7E",
- "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"},
- {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7",
- "8395FCF1E95BEBD697BD010BC766AAC3",
- "22E7ADD93CFC6393C57EC0B3C17D6B44",
- "126735FCC320D25A",
- "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go
deleted file mode 100644
index 4eb19f28..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// These vectors include key length in {128, 192, 256}, tag size 128, and
-// random nonce, header, and plaintext lengths.
-
-// This file was automatically generated.
-
-package eax
-
-var randomVectors = []struct {
- key, nonce, header, plaintext, ciphertext string
-}{
- {"DFDE093F36B0356E5A81F609786982E3",
- "1D8AC604419001816905BA72B14CED7E",
- "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6",
- "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727",
- "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"},
- {"F10619EF02E5D94D7550EB84ED364A21",
- "8DC0D4F2F745BBAE835CC5574B942D20",
- "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B",
- "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5",
- "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"},
- {"429F514EFC64D98A698A9247274CFF45",
- "976AA5EB072F912D126ACEBC954FEC38",
- "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421",
- "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8",
- "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"},
- {"398138F309085F47F8457CDF53895A63",
- "F8A8A7F2D28E5FFF7BBC2F24353F7A36",
- "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6",
- "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268",
- "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"},
- {"7A4151EBD3901B42CBA45DAFB2E931BA",
- "0FC88ACEE74DD538040321C330974EB8",
- "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9",
- "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE",
- "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"},
- {"BFB147E1CD5459424F8C0271FC0E0DC5",
- "EABCC126442BF373969EA3015988CC45",
- "4C0880E1D71AA2C7",
- "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE",
- "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"},
- {"595DD6F52D18BC2CA8EB4EDAA18D9FA3",
- "0F84B5D36CF4BC3B863313AF3B4D2E97",
- "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23",
- "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368",
- "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"},
- {"44E6F2DC8FDC778AD007137D11410F50",
- "270A237AD977F7187AA6C158A0BAB24F",
- "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78",
- "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848",
- "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"},
- {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452",
- "8B4BBE26CCD9859DCD84884159D6B0A4",
- "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED",
- "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A",
- "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"},
- {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E",
- "92745C018AA708ECFEB1667E9F3F1B01",
- "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677",
- "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083",
- "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"},
- {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1",
- "0F45CF7F0BF31CCEB85D9DA10F4D749F",
- "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05",
- "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8",
- "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"},
- {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D",
- "5B11EA1D6008EBB41CF892FCA5B943D1",
- "BAF4FF5C8242",
- "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C",
- "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"},
- {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A",
- "FD82B5B31804FF47D44199B533D0CF84",
- "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1",
- "B804",
- "164BB965C05EBE0931A1A63293EDF9C38C27"},
- {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068",
- "343BE00DA9483F05C14F2E9EB8EA6AE8",
- "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA",
- "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86",
- "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"},
- {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE",
- "E280E13D7367042E3AA09A80111B6184",
- "21486C9D7A9647",
- "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906",
- "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"},
- {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5",
- "656B41CB3F9CF8C08BAD7EBFC80BD225",
- "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4",
- "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D",
- "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"},
- {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153",
- "DA0F3A40983D92F2D4C01FED33C7A192",
- "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853",
- "F37326A80E08",
- "83519E53E321D334F7C10B568183775C0E9AAE55F806"},
- {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5",
- "3EE1182AEBB19A02B128F28E1D5F7F99",
- "D9F35ABB16D776CE",
- "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5",
- "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"},
- {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A",
- "336F5D681E0410FAE7B607246092C6DC",
- "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611",
- "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235",
- "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"},
- {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC",
- "AB91F7245DDCE3F1C747872D47BE0A8A",
- "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B",
- "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32",
- "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"},
- {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A",
- "C691294EF67CD04D1B9242AF83DD1421",
- "879334DAE3",
- "1E17F46A98FEF5CBB40759D95354",
- "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"},
- {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB",
- "4228DA0678CA3534588859E77DFF014C",
- "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7",
- "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570",
- "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"},
- {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446",
- "943188480C99437495958B0AE4831AA9",
- "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A",
- "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A",
- "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"},
- {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84",
- "B0490F5BD68A52459556B3749ACDF40E",
- "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8",
- "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F",
- "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go
deleted file mode 100644
index a6bdf512..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-// This file contains necessary tools for the aex and ocb packages.
-//
-// These functions SHOULD NOT be used elsewhere, since they are optimized for
-// specific input nature in the EAX and OCB modes of operation.
-
-package byteutil
-
-// GfnDouble computes 2 * input in the field of 2^n elements.
-// The irreducible polynomial in the finite field for n=128 is
-// x^128 + x^7 + x^2 + x + 1 (equals 0x87)
-// Constant-time execution in order to avoid side-channel attacks
-func GfnDouble(input []byte) []byte {
- if len(input) != 16 {
- panic("Doubling in GFn only implemented for n = 128")
- }
- // If the first bit is zero, return 2L = L << 1
- // Else return (L << 1) xor 0^120 10000111
- shifted := ShiftBytesLeft(input)
- shifted[15] ^= ((input[0] >> 7) * 0x87)
- return shifted
-}
-
-// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary.
-func ShiftBytesLeft(x []byte) []byte {
- l := len(x)
- dst := make([]byte, l)
- for i := 0; i < l-1; i++ {
- dst[i] = (x[i] << 1) | (x[i+1] >> 7)
- }
- dst[l-1] = x[l-1] << 1
- return dst
-}
-
-// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary.
-func ShiftNBytesLeft(dst, x []byte, n int) {
- // Erase first n / 8 bytes
- copy(dst, x[n/8:])
-
- // Shift the remaining n % 8 bits
- bits := uint(n % 8)
- l := len(dst)
- for i := 0; i < l-1; i++ {
- dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8 - bits))
- }
- dst[l-1] = dst[l-1] << bits
-
- // Append trailing zeroes
- dst = append(dst, make([]byte, n/8)...)
-}
-
-// XorBytesMut assumes equal input length, replaces X with X XOR Y
-func XorBytesMut(X, Y []byte) {
- for i := 0; i < len(X); i++ {
- X[i] ^= Y[i]
- }
-}
-
-
-// XorBytes assumes equal input length, puts X XOR Y into Z
-func XorBytes(Z, X, Y []byte) {
- for i := 0; i < len(X); i++ {
- Z[i] = X[i] ^ Y[i]
- }
-}
-
-// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X)
-func RightXor(X, Y []byte) []byte {
- offset := len(X) - len(Y)
- xored := make([]byte, len(X));
- copy(xored, X)
- for i := 0; i < len(Y); i++ {
- xored[offset + i] ^= Y[i]
- }
- return xored
-}
-
-// SliceForAppend takes a slice and a requested number of bytes. It returns a
-// slice with the contents of the given slice followed by that many bytes and a
-// second slice that aliases into it and contains only the extra bytes. If the
-// original slice has sufficient capacity then no allocation is performed.
-func SliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
-
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go
deleted file mode 100644
index 7f78cfa7..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-// Package ocb provides an implementation of the OCB (offset codebook) mode of
-// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare,
-// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT
-// AUTHENTICATED ENCRYPTION (2003).
-// Security considerations (from RFC-7253): A private key MUST NOT be used to
-// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a
-// brute-force forging adversary succeeds after 2^{tag length} attempts). A
-// single key SHOULD NOT be used to decrypt ciphertext with different tag
-// lengths. Nonces need not be secret, but MUST NOT be reused.
-// This package only supports underlying block ciphers with 128-bit blocks,
-// such as AES-{128, 192, 256}, but may be extended to other sizes.
-package ocb
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/subtle"
- "errors"
- "github.com/ProtonMail/go-crypto/internal/byteutil"
- "math/bits"
-)
-
-type ocb struct {
- block cipher.Block
- tagSize int
- nonceSize int
- mask mask
- // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop'
- // internal variable can be reused for en/decrypting with nonces sharing
- // all but the last 6 bits with N. The prefix of the first nonce used to
- // compute the new Ktop, and the Ktop value itself, are stored in
- // reusableKtop. If using incremental nonces, this saves one block cipher
- // call every 63 out of 64 OCB encryptions, and stores one nonce and one
- // output of the block cipher in memory only.
- reusableKtop reusableKtop
-}
-
-type mask struct {
- // L_*, L_$, (L_i)_{i ∈ N}
- lAst []byte
- lDol []byte
- L [][]byte
-}
-
-type reusableKtop struct {
- noncePrefix []byte
- Ktop []byte
-}
-
-const (
- defaultTagSize = 16
- defaultNonceSize = 15
-)
-
-const (
- enc = iota
- dec
-)
-
-func (o *ocb) NonceSize() int {
- return o.nonceSize
-}
-
-func (o *ocb) Overhead() int {
- return o.tagSize
-}
-
-// NewOCB returns an OCB instance with the given block cipher and default
-// tag and nonce sizes.
-func NewOCB(block cipher.Block) (cipher.AEAD, error) {
- return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize)
-}
-
-// NewOCBWithNonceAndTagSize returns an OCB instance with the given block
-// cipher, nonce length, and tag length. Panics on zero nonceSize and
-// exceedingly long tag size.
-//
-// It is recommended to use at least 12 bytes as tag length.
-func NewOCBWithNonceAndTagSize(
- block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
- if block.BlockSize() != 16 {
- return nil, ocbError("Block cipher must have 128-bit blocks")
- }
- if nonceSize < 1 {
- return nil, ocbError("Incorrect nonce length")
- }
- if nonceSize >= block.BlockSize() {
- return nil, ocbError("Nonce length exceeds blocksize - 1")
- }
- if tagSize > block.BlockSize() {
- return nil, ocbError("Custom tag length exceeds blocksize")
- }
- return &ocb{
- block: block,
- tagSize: tagSize,
- nonceSize: nonceSize,
- mask: initializeMaskTable(block),
- reusableKtop: reusableKtop{
- noncePrefix: nil,
- Ktop: nil,
- },
- }, nil
-}
-
-func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte {
- if len(nonce) > o.nonceSize {
- panic("crypto/ocb: Incorrect nonce length given to OCB")
- }
- ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize)
- o.crypt(enc, out, nonce, adata, plaintext)
- return ret
-}
-
-func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
- if len(nonce) > o.nonceSize {
- panic("Nonce too long for this instance")
- }
- if len(ciphertext) < o.tagSize {
- return nil, ocbError("Ciphertext shorter than tag length")
- }
- sep := len(ciphertext) - o.tagSize
- ret, out := byteutil.SliceForAppend(dst, len(ciphertext))
- ciphertextData := ciphertext[:sep]
- tag := ciphertext[sep:]
- o.crypt(dec, out, nonce, adata, ciphertextData)
- if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 {
- ret = ret[:sep]
- return ret, nil
- }
- for i := range out {
- out[i] = 0
- }
- return nil, ocbError("Tag authentication failed")
-}
-
-// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt)
-// function. It returns the resulting plain/ciphertext with the tag appended.
-func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
- //
- // Consider X as a sequence of 128-bit blocks
- //
- // Note: For encryption (resp. decryption), X is the plaintext (resp., the
- // ciphertext without the tag).
- blockSize := o.block.BlockSize()
-
- //
- // Nonce-dependent and per-encryption variables
- //
- // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop
- // is already computed.
- truncatedNonce := make([]byte, len(nonce))
- copy(truncatedNonce, nonce)
- truncatedNonce[len(truncatedNonce)-1] &= 192
- Ktop := make([]byte, blockSize)
- if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) {
- Ktop = o.reusableKtop.Ktop
- } else {
- // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N
- paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1)
- paddedNonce = append(paddedNonce, truncatedNonce...)
- paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1)
- // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop
- paddedNonce[blockSize-1] &= 192
- Ktop = paddedNonce
- o.block.Encrypt(Ktop, Ktop)
- o.reusableKtop.noncePrefix = truncatedNonce
- o.reusableKtop.Ktop = Ktop
- }
-
- // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8))
- xorHalves := make([]byte, blockSize/2)
- byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2])
- stretch := append(Ktop, xorHalves...)
- bottom := int(nonce[len(nonce)-1] & 63)
- offset := make([]byte, len(stretch))
- byteutil.ShiftNBytesLeft(offset, stretch, bottom)
- offset = offset[:blockSize]
-
- //
- // Process any whole blocks
- //
- // Note: For encryption Y is ciphertext || tag, for decryption Y is
- // plaintext || tag.
- checksum := make([]byte, blockSize)
- m := len(X) / blockSize
- for i := 0; i < m; i++ {
- index := bits.TrailingZeros(uint(i + 1))
- if len(o.mask.L)-1 < index {
- o.mask.extendTable(index)
- }
- byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))])
- blockX := X[i*blockSize : (i+1)*blockSize]
- blockY := Y[i*blockSize : (i+1)*blockSize]
- byteutil.XorBytes(blockY, blockX, offset)
- switch instruction {
- case enc:
- o.block.Encrypt(blockY, blockY)
- byteutil.XorBytesMut(blockY, offset)
- byteutil.XorBytesMut(checksum, blockX)
- case dec:
- o.block.Decrypt(blockY, blockY)
- byteutil.XorBytesMut(blockY, offset)
- byteutil.XorBytesMut(checksum, blockY)
- }
- }
- //
- // Process any final partial block and compute raw tag
- //
- tag := make([]byte, blockSize)
- if len(X)%blockSize != 0 {
- byteutil.XorBytesMut(offset, o.mask.lAst)
- pad := make([]byte, blockSize)
- o.block.Encrypt(pad, offset)
- chunkX := X[blockSize*m:]
- chunkY := Y[blockSize*m : len(X)]
- byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
- // P_* || bit(1) || zeroes(127) - len(P_*)
- switch instruction {
- case enc:
- paddedY := append(chunkX, byte(128))
- paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...)
- byteutil.XorBytesMut(checksum, paddedY)
- case dec:
- paddedX := append(chunkY, byte(128))
- paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...)
- byteutil.XorBytesMut(checksum, paddedX)
- }
- byteutil.XorBytes(tag, checksum, offset)
- byteutil.XorBytesMut(tag, o.mask.lDol)
- o.block.Encrypt(tag, tag)
- byteutil.XorBytesMut(tag, o.hash(adata))
- copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize])
- } else {
- byteutil.XorBytes(tag, checksum, offset)
- byteutil.XorBytesMut(tag, o.mask.lDol)
- o.block.Encrypt(tag, tag)
- byteutil.XorBytesMut(tag, o.hash(adata))
- copy(Y[blockSize*m:], tag[:o.tagSize])
- }
- return Y
-}
-
-// This hash function is used to compute the tag. Per design, on empty input it
-// returns a slice of zeros, of the same length as the underlying block cipher
-// block size.
-func (o *ocb) hash(adata []byte) []byte {
- //
- // Consider A as a sequence of 128-bit blocks
- //
- A := make([]byte, len(adata))
- copy(A, adata)
- blockSize := o.block.BlockSize()
-
- //
- // Process any whole blocks
- //
- sum := make([]byte, blockSize)
- offset := make([]byte, blockSize)
- m := len(A) / blockSize
- for i := 0; i < m; i++ {
- chunk := A[blockSize*i : blockSize*(i+1)]
- index := bits.TrailingZeros(uint(i + 1))
- // If the mask table is too short
- if len(o.mask.L)-1 < index {
- o.mask.extendTable(index)
- }
- byteutil.XorBytesMut(offset, o.mask.L[index])
- byteutil.XorBytesMut(chunk, offset)
- o.block.Encrypt(chunk, chunk)
- byteutil.XorBytesMut(sum, chunk)
- }
-
- //
- // Process any final partial block; compute final hash value
- //
- if len(A)%blockSize != 0 {
- byteutil.XorBytesMut(offset, o.mask.lAst)
- // Pad block with 1 || 0 ^ 127 - bitlength(a)
- ending := make([]byte, blockSize-len(A)%blockSize)
- ending[0] = 0x80
- encrypted := append(A[blockSize*m:], ending...)
- byteutil.XorBytesMut(encrypted, offset)
- o.block.Encrypt(encrypted, encrypted)
- byteutil.XorBytesMut(sum, encrypted)
- }
- return sum
-}
-
-func initializeMaskTable(block cipher.Block) mask {
- //
- // Key-dependent variables
- //
- lAst := make([]byte, block.BlockSize())
- block.Encrypt(lAst, lAst)
- lDol := byteutil.GfnDouble(lAst)
- L := make([][]byte, 1)
- L[0] = byteutil.GfnDouble(lDol)
-
- return mask{
- lAst: lAst,
- lDol: lDol,
- L: L,
- }
-}
-
-// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1])
-func (m *mask) extendTable(limit int) {
- for i := len(m.L); i <= limit; i++ {
- m.L = append(m.L, byteutil.GfnDouble(m.L[i-1]))
- }
-}
-
-func ocbError(err string) error {
- return errors.New("crypto/ocb: " + err)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go
deleted file mode 100644
index 0efaf344..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// In the test vectors provided by RFC 7253, the "bottom"
-// internal variable, which defines "offset" for the first time, does not
-// exceed 15. However, it can attain values up to 63.
-
-// These vectors include key length in {128, 192, 256}, tag size 128, and
-// random nonce, header, and plaintext lengths.
-
-// This file was automatically generated.
-
-package ocb
-
-var randomVectors = []struct {
- key, nonce, header, plaintext, ciphertext string
-}{
-
- {"9438C5D599308EAF13F800D2D31EA7F0",
- "C38EE4801BEBFFA1CD8635BE",
- "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536",
- "962D227786FB8913A8BAD5DC3250",
- "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"},
- {"BA7DE631C7D6712167C6724F5B9A2B1D",
- "35263EBDA05765DC0E71F1F5",
- "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83",
- "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A",
- "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"},
- {"2E74B25289F6FD3E578C24866E9C72A5",
- "FD912F15025AF8414642BA1D1D",
- "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A",
- "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB",
- "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"},
- {"E7EC507C802528F790AFF5303A017B17",
- "4B97A7A568940A9E3CE7A99E93031E",
- "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA",
- "9455B3EA706B74",
- "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"},
- {"6C928AA3224736F28EE7378DE0090191",
- "8936138E2E4C6A13280017A1622D",
- "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C",
- "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55",
- "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"},
- {"ECEA315CA4B3F425B0C9957A17805EA4",
- "664CDAE18403F4F9BA13015A44FC",
- "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5",
- "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB",
- "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"},
- {"4EE616C4A58AAA380878F71A373461F6",
- "91B8C9C176D9C385E9C47E52",
- "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F",
- "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC",
- "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"},
- {"DCD475773136C830D5E3D0C5FE05B7FF",
- "BB8E1FBB483BE7616A922C4A",
- "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B",
- "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B",
- "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"},
- {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07",
- "FACF804A4BEBF998505FF9DE",
- "8213B9263B2971A5BDA18DBD02208EE1",
- "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB",
- "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"},
- {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1",
- "0B0EF53D4606B28D1398355F",
- "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761",
- "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC",
- "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"},
- {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41",
- "5C83F4896D0738E1366B1836",
- "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5",
- "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87",
- "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"},
- {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5",
- "36B88F63ADBB5668588181D774",
- "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80",
- "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694",
- "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"},
- {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D",
- "BC099AEB637361BAC536B57618",
- "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2",
- "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8",
- "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"},
- {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED",
- "B1B0AAF373B8B026EB80422051D8",
- "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1",
- "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A",
- "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"},
- {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72",
- "2F2F17CECF7E5A756D10785A3CB9DB",
- "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671",
- "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF",
- "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"},
- {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18",
- "BCA625674F41D1E3AB47672DC0C3",
- "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43",
- "E525422519ECE070E82C",
- "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"},
- {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464",
- "36D2EC25ADD33CDEDF495205BBC923",
- "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90",
- "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F",
- "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"},
- {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71",
- "81691D5D20EC818FCFF24B33DECC",
- "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED",
- "EB861165DAF7625F827C6B574ED703F03215",
- "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"},
- {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282",
- "D2A7B94DD12CDACA909D3AD7",
- "E021A78F374FC271389AB9A3E97077D755",
- "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461",
- "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"},
- {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD",
- "65ED3D63F79F90BBFD19775E",
- "336A8C0B7243582A46B221AA677647FCAE91",
- "134A8B34824A290E7B",
- "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"},
- {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15",
- "C391A856B9FE234E14BA1AC7BB40FF",
- "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3",
- "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE",
- "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"},
- {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB",
- "B15B61C8BB39261A8F55AB178EC3",
- "D0729B6B75BB",
- "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651",
- "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"},
- {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9",
- "01D7BDB1133E9C347486C1EFA6",
- "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978",
- "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120",
- "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"},
- {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4",
- "0C52B3D11D636E5910A4DD76D32C",
- "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609",
- "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25",
- "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go
deleted file mode 100644
index 330309ff..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package ocb
-
-import (
- "encoding/hex"
-)
-
-// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is
-// shared across tests.
-var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F")
-
-var rfc7253testVectors = []struct {
- nonce, header, plaintext, ciphertext string
-}{
- {"BBAA99887766554433221100",
- "",
- "",
- "785407BFFFC8AD9EDCC5520AC9111EE6"},
- {"BBAA99887766554433221101",
- "0001020304050607",
- "0001020304050607",
- "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"},
- {"BBAA99887766554433221102",
- "0001020304050607",
- "",
- "81017F8203F081277152FADE694A0A00"},
- {"BBAA99887766554433221103",
- "",
- "0001020304050607",
- "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"},
- {"BBAA99887766554433221104",
- "000102030405060708090A0B0C0D0E0F",
- "000102030405060708090A0B0C0D0E0F",
- "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"},
- {"BBAA99887766554433221105",
- "000102030405060708090A0B0C0D0E0F",
- "",
- "8CF761B6902EF764462AD86498CA6B97"},
- {"BBAA99887766554433221106",
- "",
- "000102030405060708090A0B0C0D0E0F",
- "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"},
- {"BBAA99887766554433221107",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"},
- {"BBAA99887766554433221108",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "",
- "6DC225A071FC1B9F7C69F93B0F1E10DE"},
- {"BBAA99887766554433221109",
- "",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"},
- {"BBAA9988776655443322110A",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"},
- {"BBAA9988776655443322110B",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "",
- "FE80690BEE8A485D11F32965BC9D2A32"},
- {"BBAA9988776655443322110C",
- "",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"},
- {"BBAA9988776655443322110D",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"},
- {"BBAA9988776655443322110E",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "",
- "C5CD9D1850C141E358649994EE701B68"},
- {"BBAA9988776655443322110F",
- "",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go
deleted file mode 100644
index 5dc158f0..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package ocb
-
-// Second set of test vectors from https://tools.ietf.org/html/rfc7253
-var rfc7253TestVectorTaglen96 = struct {
- key, nonce, header, plaintext, ciphertext string
-}{"0F0E0D0C0B0A09080706050403020100",
- "BBAA9988776655443322110D",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"}
-
-var rfc7253AlgorithmTest = []struct {
- KEYLEN, TAGLEN int
- OUTPUT string }{
- {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"},
- {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"},
- {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"},
- {128, 96, "77A3D8E73589158D25D01209"},
- {192, 96, "05D56EAD2752C86BE6932C5E"},
- {256, 96, "5458359AC23B0CBA9E6330DD"},
- {128, 64, "192C9B7BD90BA06A"},
- {192, 64, "0066BC6E0EF34E24"},
- {256, 64, "7D4EA5D445501CBE"},
- }
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go
deleted file mode 100644
index 3c6251d1..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2014 Matthew Endsley
-// All rights reserved
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted providing that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-
-// Package keywrap is an implementation of the RFC 3394 AES key wrapping
-// algorithm. This is used in OpenPGP with elliptic curve keys.
-package keywrap
-
-import (
- "crypto/aes"
- "encoding/binary"
- "errors"
-)
-
-var (
- // ErrWrapPlaintext is returned if the plaintext is not a multiple
- // of 64 bits.
- ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits")
-
- // ErrUnwrapCiphertext is returned if the ciphertext is not a
- // multiple of 64 bits.
- ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits")
-
- // ErrUnwrapFailed is returned if unwrapping a key fails.
- ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key")
-
- // NB: the AES NewCipher call only fails if the key is an invalid length.
-
- // ErrInvalidKey is returned when the AES key is invalid.
- ErrInvalidKey = errors.New("keywrap: invalid AES key")
-)
-
-// Wrap a key using the RFC 3394 AES Key Wrap Algorithm.
-func Wrap(key, plainText []byte) ([]byte, error) {
- if len(plainText)%8 != 0 {
- return nil, ErrWrapPlaintext
- }
-
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, ErrInvalidKey
- }
-
- nblocks := len(plainText) / 8
-
- // 1) Initialize variables.
- var block [aes.BlockSize]byte
- // - Set A = IV, an initial value (see 2.2.3)
- for ii := 0; ii < 8; ii++ {
- block[ii] = 0xA6
- }
-
- // - For i = 1 to n
- // - Set R[i] = P[i]
- intermediate := make([]byte, len(plainText))
- copy(intermediate, plainText)
-
- // 2) Calculate intermediate values.
- for ii := 0; ii < 6; ii++ {
- for jj := 0; jj < nblocks; jj++ {
- // - B = AES(K, A | R[i])
- copy(block[8:], intermediate[jj*8:jj*8+8])
- c.Encrypt(block[:], block[:])
-
- // - A = MSB(64, B) ^ t where t = (n*j)+1
- t := uint64(ii*nblocks + jj + 1)
- val := binary.BigEndian.Uint64(block[:8]) ^ t
- binary.BigEndian.PutUint64(block[:8], val)
-
- // - R[i] = LSB(64, B)
- copy(intermediate[jj*8:jj*8+8], block[8:])
- }
- }
-
- // 3) Output results.
- // - Set C[0] = A
- // - For i = 1 to n
- // - C[i] = R[i]
- return append(block[:8], intermediate...), nil
-}
-
-// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm.
-func Unwrap(key, cipherText []byte) ([]byte, error) {
- if len(cipherText)%8 != 0 {
- return nil, ErrUnwrapCiphertext
- }
-
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, ErrInvalidKey
- }
-
- nblocks := len(cipherText)/8 - 1
-
- // 1) Initialize variables.
- var block [aes.BlockSize]byte
- // - Set A = C[0]
- copy(block[:8], cipherText[:8])
-
- // - For i = 1 to n
- // - Set R[i] = C[i]
- intermediate := make([]byte, len(cipherText)-8)
- copy(intermediate, cipherText[8:])
-
- // 2) Compute intermediate values.
- for jj := 5; jj >= 0; jj-- {
- for ii := nblocks - 1; ii >= 0; ii-- {
- // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1
- // - A = MSB(64, B)
- t := uint64(jj*nblocks + ii + 1)
- val := binary.BigEndian.Uint64(block[:8]) ^ t
- binary.BigEndian.PutUint64(block[:8], val)
-
- copy(block[8:], intermediate[ii*8:ii*8+8])
- c.Decrypt(block[:], block[:])
-
- // - R[i] = LSB(B, 64)
- copy(intermediate[ii*8:ii*8+8], block[8:])
- }
- }
-
- // 3) Output results.
- // - If A is an appropriate initial value (see 2.2.3),
- for ii := 0; ii < 8; ii++ {
- if block[ii] != 0xA6 {
- return nil, ErrUnwrapFailed
- }
- }
-
- // - For i = 1 to n
- // - P[i] = R[i]
- return intermediate, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
deleted file mode 100644
index 3b357e58..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
-// very similar to PEM except that it has an additional CRC checksum.
-package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor"
-
-import (
- "bufio"
- "bytes"
- "encoding/base64"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "io"
-)
-
-// A Block represents an OpenPGP armored structure.
-//
-// The encoded form is:
-// -----BEGIN Type-----
-// Headers
-//
-// base64-encoded Bytes
-// '=' base64 encoded checksum
-// -----END Type-----
-// where Headers is a possibly empty sequence of Key: Value lines.
-//
-// Since the armored data can be very large, this package presents a streaming
-// interface.
-type Block struct {
- Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
- Header map[string]string // Optional headers.
- Body io.Reader // A Reader from which the contents can be read
- lReader lineReader
- oReader openpgpReader
-}
-
-var ArmorCorrupt error = errors.StructuralError("armor invalid")
-
-const crc24Init = 0xb704ce
-const crc24Poly = 0x1864cfb
-const crc24Mask = 0xffffff
-
-// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
-func crc24(crc uint32, d []byte) uint32 {
- for _, b := range d {
- crc ^= uint32(b) << 16
- for i := 0; i < 8; i++ {
- crc <<= 1
- if crc&0x1000000 != 0 {
- crc ^= crc24Poly
- }
- }
- }
- return crc
-}
-
-var armorStart = []byte("-----BEGIN ")
-var armorEnd = []byte("-----END ")
-var armorEndOfLine = []byte("-----")
-
-// lineReader wraps a line based reader. It watches for the end of an armor
-// block and records the expected CRC value.
-type lineReader struct {
- in *bufio.Reader
- buf []byte
- eof bool
- crc uint32
- crcSet bool
-}
-
-func (l *lineReader) Read(p []byte) (n int, err error) {
- if l.eof {
- return 0, io.EOF
- }
-
- if len(l.buf) > 0 {
- n = copy(p, l.buf)
- l.buf = l.buf[n:]
- return
- }
-
- line, isPrefix, err := l.in.ReadLine()
- if err != nil {
- return
- }
- if isPrefix {
- return 0, ArmorCorrupt
- }
-
- if bytes.HasPrefix(line, armorEnd) {
- l.eof = true
- return 0, io.EOF
- }
-
- if len(line) == 5 && line[0] == '=' {
- // This is the checksum line
- var expectedBytes [3]byte
- var m int
- m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
- if m != 3 || err != nil {
- return
- }
- l.crc = uint32(expectedBytes[0])<<16 |
- uint32(expectedBytes[1])<<8 |
- uint32(expectedBytes[2])
-
- line, _, err = l.in.ReadLine()
- if err != nil && err != io.EOF {
- return
- }
- if !bytes.HasPrefix(line, armorEnd) {
- return 0, ArmorCorrupt
- }
-
- l.eof = true
- l.crcSet = true
- return 0, io.EOF
- }
-
- if len(line) > 96 {
- return 0, ArmorCorrupt
- }
-
- n = copy(p, line)
- bytesToSave := len(line) - n
- if bytesToSave > 0 {
- if cap(l.buf) < bytesToSave {
- l.buf = make([]byte, 0, bytesToSave)
- }
- l.buf = l.buf[0:bytesToSave]
- copy(l.buf, line[n:])
- }
-
- return
-}
-
-// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
-// a running CRC of the resulting data and checks the CRC against the value
-// found by the lineReader at EOF.
-type openpgpReader struct {
- lReader *lineReader
- b64Reader io.Reader
- currentCRC uint32
-}
-
-func (r *openpgpReader) Read(p []byte) (n int, err error) {
- n, err = r.b64Reader.Read(p)
- r.currentCRC = crc24(r.currentCRC, p[:n])
-
- if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
- return 0, ArmorCorrupt
- }
-
- return
-}
-
-// Decode reads a PGP armored block from the given Reader. It will ignore
-// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
-// given Reader is not usable after calling this function: an arbitrary amount
-// of data may have been read past the end of the block.
-func Decode(in io.Reader) (p *Block, err error) {
- r := bufio.NewReaderSize(in, 100)
- var line []byte
- ignoreNext := false
-
-TryNextBlock:
- p = nil
-
- // Skip leading garbage
- for {
- ignoreThis := ignoreNext
- line, ignoreNext, err = r.ReadLine()
- if err != nil {
- return
- }
- if ignoreNext || ignoreThis {
- continue
- }
- line = bytes.TrimSpace(line)
- if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
- break
- }
- }
-
- p = new(Block)
- p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
- p.Header = make(map[string]string)
- nextIsContinuation := false
- var lastKey string
-
- // Read headers
- for {
- isContinuation := nextIsContinuation
- line, nextIsContinuation, err = r.ReadLine()
- if err != nil {
- p = nil
- return
- }
- if isContinuation {
- p.Header[lastKey] += string(line)
- continue
- }
- line = bytes.TrimSpace(line)
- if len(line) == 0 {
- break
- }
-
- i := bytes.Index(line, []byte(": "))
- if i == -1 {
- goto TryNextBlock
- }
- lastKey = string(line[:i])
- p.Header[lastKey] = string(line[i+2:])
- }
-
- p.lReader.in = r
- p.oReader.currentCRC = crc24Init
- p.oReader.lReader = &p.lReader
- p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
- p.Body = &p.oReader
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go
deleted file mode 100644
index 6f07582c..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package armor
-
-import (
- "encoding/base64"
- "io"
-)
-
-var armorHeaderSep = []byte(": ")
-var blockEnd = []byte("\n=")
-var newline = []byte("\n")
-var armorEndOfLineOut = []byte("-----\n")
-
-// writeSlices writes its arguments to the given Writer.
-func writeSlices(out io.Writer, slices ...[]byte) (err error) {
- for _, s := range slices {
- _, err = out.Write(s)
- if err != nil {
- return err
- }
- }
- return
-}
-
-// lineBreaker breaks data across several lines, all of the same byte length
-// (except possibly the last). Lines are broken with a single '\n'.
-type lineBreaker struct {
- lineLength int
- line []byte
- used int
- out io.Writer
- haveWritten bool
-}
-
-func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
- return &lineBreaker{
- lineLength: lineLength,
- line: make([]byte, lineLength),
- used: 0,
- out: out,
- }
-}
-
-func (l *lineBreaker) Write(b []byte) (n int, err error) {
- n = len(b)
-
- if n == 0 {
- return
- }
-
- if l.used == 0 && l.haveWritten {
- _, err = l.out.Write([]byte{'\n'})
- if err != nil {
- return
- }
- }
-
- if l.used+len(b) < l.lineLength {
- l.used += copy(l.line[l.used:], b)
- return
- }
-
- l.haveWritten = true
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- excess := l.lineLength - l.used
- l.used = 0
-
- _, err = l.out.Write(b[0:excess])
- if err != nil {
- return
- }
-
- _, err = l.Write(b[excess:])
- return
-}
-
-func (l *lineBreaker) Close() (err error) {
- if l.used > 0 {
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- }
-
- return
-}
-
-// encoding keeps track of a running CRC24 over the data which has been written
-// to it and outputs a OpenPGP checksum when closed, followed by an armor
-// trailer.
-//
-// It's built into a stack of io.Writers:
-// encoding -> base64 encoder -> lineBreaker -> out
-type encoding struct {
- out io.Writer
- breaker *lineBreaker
- b64 io.WriteCloser
- crc uint32
- blockType []byte
-}
-
-func (e *encoding) Write(data []byte) (n int, err error) {
- e.crc = crc24(e.crc, data)
- return e.b64.Write(data)
-}
-
-func (e *encoding) Close() (err error) {
- err = e.b64.Close()
- if err != nil {
- return
- }
- e.breaker.Close()
-
- var checksumBytes [3]byte
- checksumBytes[0] = byte(e.crc >> 16)
- checksumBytes[1] = byte(e.crc >> 8)
- checksumBytes[2] = byte(e.crc)
-
- var b64ChecksumBytes [4]byte
- base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
-
- return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
-}
-
-// Encode returns a WriteCloser which will encode the data written to it in
-// OpenPGP armor.
-func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
- bType := []byte(blockType)
- err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
- if err != nil {
- return
- }
-
- for k, v := range headers {
- err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
- if err != nil {
- return
- }
- }
-
- _, err = out.Write(newline)
- if err != nil {
- return
- }
-
- e := &encoding{
- out: out,
- breaker: newLineBreaker(out, 64),
- crc: crc24Init,
- blockType: bType,
- }
- e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
- return e, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go
deleted file mode 100644
index a94f6150..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "hash"
- "io"
-)
-
-// NewCanonicalTextHash reformats text written to it into the canonical
-// form and then applies the hash h. See RFC 4880, section 5.2.1.
-func NewCanonicalTextHash(h hash.Hash) hash.Hash {
- return &canonicalTextHash{h, 0}
-}
-
-type canonicalTextHash struct {
- h hash.Hash
- s int
-}
-
-var newline = []byte{'\r', '\n'}
-
-func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) {
- start := 0
- for i, c := range buf {
- switch *s {
- case 0:
- if c == '\r' {
- *s = 1
- } else if c == '\n' {
- cw.Write(buf[start:i])
- cw.Write(newline)
- start = i + 1
- }
- case 1:
- *s = 0
- }
- }
-
- cw.Write(buf[start:])
- return len(buf), nil
-}
-
-func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
- return writeCanonical(cth.h, buf, &cth.s)
-}
-
-func (cth *canonicalTextHash) Sum(in []byte) []byte {
- return cth.h.Sum(in)
-}
-
-func (cth *canonicalTextHash) Reset() {
- cth.h.Reset()
- cth.s = 0
-}
-
-func (cth *canonicalTextHash) Size() int {
- return cth.h.Size()
-}
-
-func (cth *canonicalTextHash) BlockSize() int {
- return cth.h.BlockSize()
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go
deleted file mode 100644
index b09e2a73..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ecdh implements ECDH encryption, suitable for OpenPGP,
-// as specified in RFC 6637, section 8.
-package ecdh
-
-import (
- "bytes"
- "errors"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
-)
-
-type KDF struct {
- Hash algorithm.Hash
- Cipher algorithm.Cipher
-}
-
-type PublicKey struct {
- curve ecc.ECDHCurve
- Point []byte
- KDF
-}
-
-type PrivateKey struct {
- PublicKey
- D []byte
-}
-
-func NewPublicKey(curve ecc.ECDHCurve, kdfHash algorithm.Hash, kdfCipher algorithm.Cipher) *PublicKey {
- return &PublicKey{
- curve: curve,
- KDF: KDF{
- Hash: kdfHash,
- Cipher: kdfCipher,
- },
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.ECDHCurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalBytePoint(pk.Point)
-}
-
-func (pk *PublicKey) UnmarshalPoint(p []byte) error {
- pk.Point = pk.curve.UnmarshalBytePoint(p)
- if pk.Point == nil {
- return errors.New("ecdh: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalByteSecret() []byte {
- return sk.curve.MarshalByteSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalByteSecret(d)
-
- if sk.D == nil {
- return errors.New("ecdh: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.ECDHCurve, kdf KDF) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.KDF = kdf
- priv.PublicKey.Point, priv.D, err = c.GenerateECDH(rand)
- return
-}
-
-func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) {
- if len(msg) > 40 {
- return nil, nil, errors.New("ecdh: message too long")
- }
- // the sender MAY use 21, 13, and 5 bytes of padding for AES-128,
- // AES-192, and AES-256, respectively, to provide the same number of
- // octets, 40 total, as an input to the key wrapping method.
- padding := make([]byte, 40-len(msg))
- for i := range padding {
- padding[i] = byte(40 - len(msg))
- }
- m := append(msg, padding...)
-
- ephemeral, zb, err := pub.curve.Encaps(random, pub.Point)
- if err != nil {
- return nil, nil, err
- }
-
- vsG = pub.curve.MarshalBytePoint(ephemeral)
-
- z, err := buildKey(pub, zb, curveOID, fingerprint, false, false)
- if err != nil {
- return nil, nil, err
- }
-
- if c, err = keywrap.Wrap(z, m); err != nil {
- return nil, nil, err
- }
-
- return vsG, c, nil
-
-}
-
-func Decrypt(priv *PrivateKey, vsG, c, curveOID, fingerprint []byte) (msg []byte, err error) {
- var m []byte
- zb, err := priv.PublicKey.curve.Decaps(priv.curve.UnmarshalBytePoint(vsG), priv.D)
-
- // Try buildKey three times to workaround an old bug, see comments in buildKey.
- for i := 0; i < 3; i++ {
- var z []byte
- // RFC6637 §8: "Compute Z = KDF( S, Z_len, Param );"
- z, err = buildKey(&priv.PublicKey, zb, curveOID, fingerprint, i == 1, i == 2)
- if err != nil {
- return nil, err
- }
-
- // RFC6637 §8: "Compute C = AESKeyWrap( Z, c ) as per [RFC3394]"
- m, err = keywrap.Unwrap(z, c)
- if err == nil {
- break
- }
- }
-
- // Only return an error after we've tried all (required) variants of buildKey.
- if err != nil {
- return nil, err
- }
-
- // RFC6637 §8: "m = symm_alg_ID || session key || checksum || pkcs5_padding"
- // The last byte should be the length of the padding, as per PKCS5; strip it off.
- return m[:len(m)-int(m[len(m)-1])], nil
-}
-
-func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) {
- // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03
- // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap
- // || "Anonymous Sender " || recipient_fingerprint;
- param := new(bytes.Buffer)
- if _, err := param.Write(curveOID); err != nil {
- return nil, err
- }
- algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()}
- if _, err := param.Write(algKDF); err != nil {
- return nil, err
- }
- if _, err := param.Write([]byte("Anonymous Sender ")); err != nil {
- return nil, err
- }
- // For v5 keys, the 20 leftmost octets of the fingerprint are used.
- if _, err := param.Write(fingerprint[:20]); err != nil {
- return nil, err
- }
- if param.Len() - len(curveOID) != 45 {
- return nil, errors.New("ecdh: malformed KDF Param")
- }
-
- // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param );
- h := pub.KDF.Hash.New()
- if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil {
- return nil, err
- }
- zbLen := len(zb)
- i := 0
- j := zbLen - 1
- if stripLeading {
- // Work around old go crypto bug where the leading zeros are missing.
- for ; i < zbLen && zb[i] == 0; i++ {}
- }
- if stripTrailing {
- // Work around old OpenPGP.js bug where insignificant trailing zeros in
- // this little-endian number are missing.
- // (See https://github.com/openpgpjs/openpgpjs/pull/853.)
- for ; j >= 0 && zb[j] == 0; j-- {}
- }
- if _, err := h.Write(zb[i:j+1]); err != nil {
- return nil, err
- }
- if _, err := h.Write(param.Bytes()); err != nil {
- return nil, err
- }
- mb := h.Sum(nil)
-
- return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB.
-
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateECDH(priv.Point, priv.D)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go
deleted file mode 100644
index 6682a21a..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package ecdsa implements ECDSA signature, suitable for OpenPGP,
-// as specified in RFC 6637, section 5.
-package ecdsa
-
-import (
- "errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "io"
- "math/big"
-)
-
-type PublicKey struct {
- X, Y *big.Int
- curve ecc.ECDSACurve
-}
-
-type PrivateKey struct {
- PublicKey
- D *big.Int
-}
-
-func NewPublicKey(curve ecc.ECDSACurve) *PublicKey {
- return &PublicKey{
- curve: curve,
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.ECDSACurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalIntegerPoint(pk.X, pk.Y)
-}
-
-func (pk *PublicKey) UnmarshalPoint(p []byte) error {
- pk.X, pk.Y = pk.curve.UnmarshalIntegerPoint(p)
- if pk.X == nil {
- return errors.New("ecdsa: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalIntegerSecret() []byte {
- return sk.curve.MarshalIntegerSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalIntegerSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalIntegerSecret(d)
-
- if sk.D == nil {
- return errors.New("ecdsa: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.ECDSACurve) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.X, priv.PublicKey.Y, priv.D, err = c.GenerateECDSA(rand)
- return
-}
-
-func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
- return priv.PublicKey.curve.Sign(rand, priv.X, priv.Y, priv.D, hash)
-}
-
-func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
- return pub.curve.Verify(pub.X, pub.Y, hash, r, s)
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateECDSA(priv.X, priv.Y, priv.D.Bytes())
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go
deleted file mode 100644
index 12866c12..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Package eddsa implements EdDSA signature, suitable for OpenPGP, as specified in
-// https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
-package eddsa
-
-import (
- "errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "io"
-)
-
-type PublicKey struct {
- X []byte
- curve ecc.EdDSACurve
-}
-
-type PrivateKey struct {
- PublicKey
- D []byte
-}
-
-func NewPublicKey(curve ecc.EdDSACurve) *PublicKey {
- return &PublicKey{
- curve: curve,
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.EdDSACurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalBytePoint(pk.X)
-}
-
-func (pk *PublicKey) UnmarshalPoint(x []byte) error {
- pk.X = pk.curve.UnmarshalBytePoint(x)
-
- if pk.X == nil {
- return errors.New("eddsa: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalByteSecret() []byte {
- return sk.curve.MarshalByteSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalByteSecret(d)
-
- if sk.D == nil {
- return errors.New("eddsa: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.EdDSACurve) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.X, priv.D, err = c.GenerateEdDSA(rand)
- return
-}
-
-func Sign(priv *PrivateKey, message []byte) (r, s []byte, err error) {
- sig, err := priv.PublicKey.curve.Sign(priv.PublicKey.X, priv.D, message)
- if err != nil {
- return nil, nil, err
- }
-
- r, s = priv.PublicKey.curve.MarshalSignature(sig)
- return
-}
-
-func Verify(pub *PublicKey, message, r, s []byte) bool {
- sig := pub.curve.UnmarshalSignature(r, s)
- if sig == nil {
- return false
- }
-
- return pub.curve.Verify(pub.X, message, sig)
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateEdDSA(priv.PublicKey.X, priv.D)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go
deleted file mode 100644
index 6a07d8ff..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
-// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
-// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
-// n. 4, 1985, pp. 469-472.
-//
-// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
-// unsuitable for other protocols. RSA should be used in preference in any
-// case.
-package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal"
-
-import (
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "io"
- "math/big"
-)
-
-// PublicKey represents an ElGamal public key.
-type PublicKey struct {
- G, P, Y *big.Int
-}
-
-// PrivateKey represents an ElGamal private key.
-type PrivateKey struct {
- PublicKey
- X *big.Int
-}
-
-// Encrypt encrypts the given message to the given public key. The result is a
-// pair of integers. Errors can result from reading random, or because msg is
-// too large to be encrypted to the public key.
-func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
- pLen := (pub.P.BitLen() + 7) / 8
- if len(msg) > pLen-11 {
- err = errors.New("elgamal: message too long")
- return
- }
-
- // EM = 0x02 || PS || 0x00 || M
- em := make([]byte, pLen-1)
- em[0] = 2
- ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
- err = nonZeroRandomBytes(ps, random)
- if err != nil {
- return
- }
- em[len(em)-len(msg)-1] = 0
- copy(mm, msg)
-
- m := new(big.Int).SetBytes(em)
-
- k, err := rand.Int(random, pub.P)
- if err != nil {
- return
- }
-
- c1 = new(big.Int).Exp(pub.G, k, pub.P)
- s := new(big.Int).Exp(pub.Y, k, pub.P)
- c2 = s.Mul(s, m)
- c2.Mod(c2, pub.P)
-
- return
-}
-
-// Decrypt takes two integers, resulting from an ElGamal encryption, and
-// returns the plaintext of the message. An error can result only if the
-// ciphertext is invalid. Users should keep in mind that this is a padding
-// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
-// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
-// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
-// Bleichenbacher, Advances in Cryptology (Crypto '98),
-func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
- s := new(big.Int).Exp(c1, priv.X, priv.P)
- if s.ModInverse(s, priv.P) == nil {
- return nil, errors.New("elgamal: invalid private key")
- }
- s.Mul(s, c2)
- s.Mod(s, priv.P)
- em := s.Bytes()
-
- firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
-
- // The remainder of the plaintext must be a string of non-zero random
- // octets, followed by a 0, followed by the message.
- // lookingForIndex: 1 iff we are still looking for the zero.
- // index: the offset of the first zero byte.
- var lookingForIndex, index int
- lookingForIndex = 1
-
- for i := 1; i < len(em); i++ {
- equals0 := subtle.ConstantTimeByteEq(em[i], 0)
- index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
- lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
- }
-
- if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
- return nil, errors.New("elgamal: decryption error")
- }
- return em[index+1:], nil
-}
-
-// nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
- _, err = io.ReadFull(rand, s)
- if err != nil {
- return
- }
-
- for i := 0; i < len(s); i++ {
- for s[i] == 0 {
- _, err = io.ReadFull(rand, s[i:i+1])
- if err != nil {
- return
- }
- }
- }
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
deleted file mode 100644
index 17e2bcfe..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package errors contains common error types for the OpenPGP packages.
-package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors"
-
-import (
- "strconv"
-)
-
-// A StructuralError is returned when OpenPGP data is found to be syntactically
-// invalid.
-type StructuralError string
-
-func (s StructuralError) Error() string {
- return "openpgp: invalid data: " + string(s)
-}
-
-// UnsupportedError indicates that, although the OpenPGP data is valid, it
-// makes use of currently unimplemented features.
-type UnsupportedError string
-
-func (s UnsupportedError) Error() string {
- return "openpgp: unsupported feature: " + string(s)
-}
-
-// InvalidArgumentError indicates that the caller is in error and passed an
-// incorrect value.
-type InvalidArgumentError string
-
-func (i InvalidArgumentError) Error() string {
- return "openpgp: invalid argument: " + string(i)
-}
-
-// SignatureError indicates that a syntactically valid signature failed to
-// validate.
-type SignatureError string
-
-func (b SignatureError) Error() string {
- return "openpgp: invalid signature: " + string(b)
-}
-
-var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch")
-var ErrMDCMissing error = SignatureError("MDC packet not found")
-
-type signatureExpiredError int
-
-func (se signatureExpiredError) Error() string {
- return "openpgp: signature expired"
-}
-
-var ErrSignatureExpired error = signatureExpiredError(0)
-
-type keyExpiredError int
-
-func (ke keyExpiredError) Error() string {
- return "openpgp: key expired"
-}
-
-var ErrKeyExpired error = keyExpiredError(0)
-
-type keyIncorrectError int
-
-func (ki keyIncorrectError) Error() string {
- return "openpgp: incorrect key"
-}
-
-var ErrKeyIncorrect error = keyIncorrectError(0)
-
-// KeyInvalidError indicates that the public key parameters are invalid
-// as they do not match the private ones
-type KeyInvalidError string
-
-func (e KeyInvalidError) Error() string {
- return "openpgp: invalid key: " + string(e)
-}
-
-type unknownIssuerError int
-
-func (unknownIssuerError) Error() string {
- return "openpgp: signature made by unknown entity"
-}
-
-var ErrUnknownIssuer error = unknownIssuerError(0)
-
-type keyRevokedError int
-
-func (keyRevokedError) Error() string {
- return "openpgp: signature made by revoked key"
-}
-
-var ErrKeyRevoked error = keyRevokedError(0)
-
-type UnknownPacketTypeError uint8
-
-func (upte UnknownPacketTypeError) Error() string {
- return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
-}
-
-// AEADError indicates that there is a problem when initializing or using a
-// AEAD instance, configuration struct, nonces or index values.
-type AEADError string
-
-func (ae AEADError) Error() string {
- return "openpgp: aead error: " + string(ae)
-}
-
-// ErrDummyPrivateKey results when operations are attempted on a private key
-// that is just a dummy key. See
-// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109
-type ErrDummyPrivateKey string
-
-func (dke ErrDummyPrivateKey) Error() string {
- return "openpgp: s2k GNU dummy key: " + string(dke)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go
deleted file mode 100644
index d0670651..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package algorithm
-
-import (
- "crypto/cipher"
- "github.com/ProtonMail/go-crypto/eax"
- "github.com/ProtonMail/go-crypto/ocb"
-)
-
-// AEADMode defines the Authenticated Encryption with Associated Data mode of
-// operation.
-type AEADMode uint8
-
-// Supported modes of operation (see RFC4880bis [EAX] and RFC7253)
-const (
- AEADModeEAX = AEADMode(1)
- AEADModeOCB = AEADMode(2)
- AEADModeGCM = AEADMode(3)
-)
-
-// TagLength returns the length in bytes of authentication tags.
-func (mode AEADMode) TagLength() int {
- switch mode {
- case AEADModeEAX:
- return 16
- case AEADModeOCB:
- return 16
- case AEADModeGCM:
- return 16
- default:
- return 0
- }
-}
-
-// NonceLength returns the length in bytes of nonces.
-func (mode AEADMode) NonceLength() int {
- switch mode {
- case AEADModeEAX:
- return 16
- case AEADModeOCB:
- return 15
- case AEADModeGCM:
- return 12
- default:
- return 0
- }
-}
-
-// New returns a fresh instance of the given mode
-func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) {
- var err error
- switch mode {
- case AEADModeEAX:
- alg, err = eax.NewEAX(block)
- case AEADModeOCB:
- alg, err = ocb.NewOCB(block)
- case AEADModeGCM:
- alg, err = cipher.NewGCM(block)
- }
- if err != nil {
- panic(err.Error())
- }
- return alg
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go
deleted file mode 100644
index 5760cff8..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package algorithm
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
-
- "golang.org/x/crypto/cast5"
-)
-
-// Cipher is an official symmetric key cipher algorithm. See RFC 4880,
-// section 9.2.
-type Cipher interface {
- // Id returns the algorithm ID, as a byte, of the cipher.
- Id() uint8
- // KeySize returns the key size, in bytes, of the cipher.
- KeySize() int
- // BlockSize returns the block size, in bytes, of the cipher.
- BlockSize() int
- // New returns a fresh instance of the given cipher.
- New(key []byte) cipher.Block
-}
-
-// The following constants mirror the OpenPGP standard (RFC 4880).
-const (
- TripleDES = CipherFunction(2)
- CAST5 = CipherFunction(3)
- AES128 = CipherFunction(7)
- AES192 = CipherFunction(8)
- AES256 = CipherFunction(9)
-)
-
-// CipherById represents the different block ciphers specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
-var CipherById = map[uint8]Cipher{
- TripleDES.Id(): TripleDES,
- CAST5.Id(): CAST5,
- AES128.Id(): AES128,
- AES192.Id(): AES192,
- AES256.Id(): AES256,
-}
-
-type CipherFunction uint8
-
-// ID returns the algorithm Id, as a byte, of cipher.
-func (sk CipherFunction) Id() uint8 {
- return uint8(sk)
-}
-
-var keySizeByID = map[uint8]int{
- TripleDES.Id(): 24,
- CAST5.Id(): cast5.KeySize,
- AES128.Id(): 16,
- AES192.Id(): 24,
- AES256.Id(): 32,
-}
-
-// KeySize returns the key size, in bytes, of cipher.
-func (cipher CipherFunction) KeySize() int {
- switch cipher {
- case TripleDES:
- return 24
- case CAST5:
- return cast5.KeySize
- case AES128:
- return 16
- case AES192:
- return 24
- case AES256:
- return 32
- }
- return 0
-}
-
-// BlockSize returns the block size, in bytes, of cipher.
-func (cipher CipherFunction) BlockSize() int {
- switch cipher {
- case TripleDES:
- return des.BlockSize
- case CAST5:
- return 8
- case AES128, AES192, AES256:
- return 16
- }
- return 0
-}
-
-// New returns a fresh instance of the given cipher.
-func (cipher CipherFunction) New(key []byte) (block cipher.Block) {
- var err error
- switch cipher {
- case TripleDES:
- block, err = des.NewTripleDESCipher(key)
- case CAST5:
- block, err = cast5.NewCipher(key)
- case AES128, AES192, AES256:
- block, err = aes.NewCipher(key)
- }
- if err != nil {
- panic(err.Error())
- }
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go
deleted file mode 100644
index 82e43d67..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package algorithm
-
-import (
- "crypto"
- "fmt"
- "hash"
-)
-
-// Hash is an official hash function algorithm. See RFC 4880, section 9.4.
-type Hash interface {
- // Id returns the algorithm ID, as a byte, of Hash.
- Id() uint8
- // Available reports whether the given hash function is linked into the binary.
- Available() bool
- // HashFunc simply returns the value of h so that Hash implements SignerOpts.
- HashFunc() crypto.Hash
- // New returns a new hash.Hash calculating the given hash function. New
- // panics if the hash function is not linked into the binary.
- New() hash.Hash
- // Size returns the length, in bytes, of a digest resulting from the given
- // hash function. It doesn't require that the hash function in question be
- // linked into the program.
- Size() int
- // String is the name of the hash function corresponding to the given
- // OpenPGP hash id.
- String() string
-}
-
-// The following vars mirror the crypto/Hash supported hash functions.
-var (
- SHA1 Hash = cryptoHash{2, crypto.SHA1}
- SHA256 Hash = cryptoHash{8, crypto.SHA256}
- SHA384 Hash = cryptoHash{9, crypto.SHA384}
- SHA512 Hash = cryptoHash{10, crypto.SHA512}
- SHA224 Hash = cryptoHash{11, crypto.SHA224}
- SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256}
- SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512}
-)
-
-// HashById represents the different hash functions specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14
-var (
- HashById = map[uint8]Hash{
- SHA256.Id(): SHA256,
- SHA384.Id(): SHA384,
- SHA512.Id(): SHA512,
- SHA224.Id(): SHA224,
- SHA3_256.Id(): SHA3_256,
- SHA3_512.Id(): SHA3_512,
- }
-)
-
-// cryptoHash contains pairs relating OpenPGP's hash identifier with
-// Go's crypto.Hash type. See RFC 4880, section 9.4.
-type cryptoHash struct {
- id uint8
- crypto.Hash
-}
-
-// Id returns the algorithm ID, as a byte, of cryptoHash.
-func (h cryptoHash) Id() uint8 {
- return h.id
-}
-
-var hashNames = map[uint8]string{
- SHA256.Id(): "SHA256",
- SHA384.Id(): "SHA384",
- SHA512.Id(): "SHA512",
- SHA224.Id(): "SHA224",
- SHA3_256.Id(): "SHA3-256",
- SHA3_512.Id(): "SHA3-512",
-}
-
-func (h cryptoHash) String() string {
- s, ok := hashNames[h.id]
- if !ok {
- panic(fmt.Sprintf("Unsupported hash function %d", h.id))
- }
- return s
-}
-
-// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id.
-func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.HashFunc(), true
- }
- return 0, false
-}
-
-// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id, allowing sha1.
-func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.HashFunc(), true
- }
-
- if id == SHA1.Id() {
- return SHA1.HashFunc(), true
- }
-
- return 0, false
-}
-
-// HashIdToString returns the name of the hash function corresponding to the
-// given OpenPGP hash id.
-func HashIdToString(id byte) (name string, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.String(), true
- }
- return "", false
-}
-
-// HashToHashId returns an OpenPGP hash id which corresponds the given Hash.
-func HashToHashId(h crypto.Hash) (id byte, ok bool) {
- for id, hash := range HashById {
- if hash.HashFunc() == h {
- return id, true
- }
- }
-
- return 0, false
-}
-
-// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash,
-// allowing instances of SHA1
-func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) {
- for id, hash := range HashById {
- if hash.HashFunc() == h {
- return id, true
- }
- }
-
- if h == SHA1.HashFunc() {
- return SHA1.Id(), true
- }
-
- return 0, false
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go
deleted file mode 100644
index 266635ec..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x25519lib "github.com/cloudflare/circl/dh/x25519"
-)
-
-type curve25519 struct {}
-
-func NewCurve25519() *curve25519 {
- return &curve25519{}
-}
-
-func (c *curve25519) GetCurveName() string {
- return "curve25519"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *curve25519) MarshalBytePoint(point [] byte) []byte {
- return append([]byte{0x40}, point...)
-}
-
-// UnmarshalBytePoint decodes the public point to native format, removing the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *curve25519) UnmarshalBytePoint(point []byte) []byte {
- if len(point) != x25519lib.Size + 1 {
- return nil
- }
-
- // Remove prefix
- return point[1:]
-}
-
-// MarshalByteSecret encodes the secret scalar from native format.
-// Note that the EC secret scalar differs from the definition of public keys in
-// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is
-// more uniform with how big integers are represented in OpenPGP, and (2) the
-// leading zeros are truncated.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1
-// Note that leading zero bytes are stripped later when encoding as an MPI.
-func (c *curve25519) MarshalByteSecret(secret []byte) []byte {
- d := make([]byte, x25519lib.Size)
- copyReversed(d, secret)
-
- // The following ensures that the private key is a number of the form
- // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of
- // the curve.
- //
- // This masking is done internally in the underlying lib and so is unnecessary
- // for security, but OpenPGP implementations require that private keys be
- // pre-masked.
- d[0] &= 127
- d[0] |= 64
- d[31] &= 248
-
- return d
-}
-
-// UnmarshalByteSecret decodes the secret scalar from native format.
-// Note that the EC secret scalar differs from the definition of public keys in
-// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is
-// more uniform with how big integers are represented in OpenPGP, and (2) the
-// leading zeros are truncated.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1
-func (c *curve25519) UnmarshalByteSecret(d []byte) []byte {
- if len(d) > x25519lib.Size {
- return nil
- }
-
- // Ensure truncated leading bytes are re-added
- secret := make([]byte, x25519lib.Size)
- copyReversed(secret, d)
-
- return secret
-}
-
-// generateKeyPairBytes Generates a private-public key-pair.
-// 'priv' is a private key; a little-endian scalar belonging to the set
-// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the
-// curve. 'pub' is simply 'priv' * G where G is the base point.
-// See https://cr.yp.to/ecdh.html and RFC7748, sec 5.
-func (c *curve25519) generateKeyPairBytes(rand io.Reader) (priv, pub x25519lib.Key, err error) {
- _, err = io.ReadFull(rand, priv[:])
- if err != nil {
- return
- }
-
- x25519lib.KeyGen(&pub, &priv)
- return
-}
-
-func (c *curve25519) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) {
- priv, pub, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return
- }
-
- return pub[:], priv[:], nil
-}
-
-func (c *genericCurve) MaskSecret(secret []byte) []byte {
- return secret
-}
-
-func (c *curve25519) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- // RFC6637 §8: "Generate an ephemeral key pair {v, V=vG}"
- // ephemeralPrivate corresponds to `v`.
- // ephemeralPublic corresponds to `V`.
- ephemeralPrivate, ephemeralPublic, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return nil, nil, err
- }
-
- // RFC6637 §8: "Obtain the authenticated recipient public key R"
- // pubKey corresponds to `R`.
- var pubKey x25519lib.Key
- copy(pubKey[:], point)
-
- // RFC6637 §8: "Compute the shared point S = vR"
- // "VB = convert point V to the octet string"
- // sharedPoint corresponds to `VB`.
- var sharedPoint x25519lib.Key
- x25519lib.Shared(&sharedPoint, &ephemeralPrivate, &pubKey)
-
- return ephemeralPublic[:], sharedPoint[:], nil
-}
-
-func (c *curve25519) Decaps(vsG, secret []byte) (sharedSecret []byte, err error) {
- var ephemeralPublic, decodedPrivate, sharedPoint x25519lib.Key
- // RFC6637 §8: "The decryption is the inverse of the method given."
- // All quoted descriptions in comments below describe encryption, and
- // the reverse is performed.
- // vsG corresponds to `VB` in RFC6637 §8 .
-
- // RFC6637 §8: "VB = convert point V to the octet string"
- copy(ephemeralPublic[:], vsG)
-
- // decodedPrivate corresponds to `r` in RFC6637 §8 .
- copy(decodedPrivate[:], secret)
-
- // RFC6637 §8: "Note that the recipient obtains the shared secret by calculating
- // S = rV = rvG, where (r,R) is the recipient's key pair."
- // sharedPoint corresponds to `S`.
- x25519lib.Shared(&sharedPoint, &decodedPrivate, &ephemeralPublic)
-
- return sharedPoint[:], nil
-}
-
-func (c *curve25519) ValidateECDH(point []byte, secret []byte) (err error) {
- var pk, sk x25519lib.Key
- copy(sk[:], secret)
- x25519lib.KeyGen(&pk, &sk)
-
- if subtle.ConstantTimeCompare(point, pk[:]) == 0 {
- return errors.KeyInvalidError("ecc: invalid curve25519 public point")
- }
-
- return nil
-}
-
-func copyReversed(out []byte, in []byte) {
- l := len(in)
- for i := 0; i < l; i++ {
- out[i] = in[l-i-1]
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go
deleted file mode 100644
index df2878c9..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "bytes"
- "crypto/elliptic"
- "github.com/ProtonMail/go-crypto/bitcurves"
- "github.com/ProtonMail/go-crypto/brainpool"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
-)
-
-type CurveInfo struct {
- GenName string
- Oid *encoding.OID
- Curve Curve
-}
-
-var Curves = []CurveInfo{
- {
- // NIST P-256
- GenName: "P256",
- Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}),
- Curve: NewGenericCurve(elliptic.P256()),
- },
- {
- // NIST P-384
- GenName: "P384",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}),
- Curve: NewGenericCurve(elliptic.P384()),
- },
- {
- // NIST P-521
- GenName: "P521",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}),
- Curve: NewGenericCurve(elliptic.P521()),
- },
- {
- // SecP256k1
- GenName: "SecP256k1",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}),
- Curve: NewGenericCurve(bitcurves.S256()),
- },
- {
- // Curve25519
- GenName: "Curve25519",
- Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}),
- Curve: NewCurve25519(),
- },
- {
- // X448
- GenName: "Curve448",
- Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}),
- Curve: NewX448(),
- },
- {
- // Ed25519
- GenName: "Curve25519",
- Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}),
- Curve: NewEd25519(),
- },
- {
- // Ed448
- GenName: "Curve448",
- Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}),
- Curve: NewEd448(),
- },
- {
- // BrainpoolP256r1
- GenName: "BrainpoolP256",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}),
- Curve: NewGenericCurve(brainpool.P256r1()),
- },
- {
- // BrainpoolP384r1
- GenName: "BrainpoolP384",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}),
- Curve: NewGenericCurve(brainpool.P384r1()),
- },
- {
- // BrainpoolP512r1
- GenName: "BrainpoolP512",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}),
- Curve: NewGenericCurve(brainpool.P512r1()),
- },
-}
-
-func FindByCurve(curve Curve) *CurveInfo {
- for _, curveInfo := range Curves {
- if curveInfo.Curve.GetCurveName() == curve.GetCurveName() {
- return &curveInfo
- }
- }
- return nil
-}
-
-func FindByOid(oid encoding.Field) *CurveInfo {
- var rawBytes = oid.Bytes()
- for _, curveInfo := range Curves {
- if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) {
- return &curveInfo
- }
- }
- return nil
-}
-
-func FindEdDSAByGenName(curveGenName string) EdDSACurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(EdDSACurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
-
-func FindECDSAByGenName(curveGenName string) ECDSACurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(ECDSACurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
-
-func FindECDHByGenName(curveGenName string) ECDHCurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(ECDHCurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go
deleted file mode 100644
index c47072b4..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "io"
- "math/big"
-)
-
-type Curve interface {
- GetCurveName() string
-}
-
-type ECDSACurve interface {
- Curve
- MarshalIntegerPoint(x, y *big.Int) []byte
- UnmarshalIntegerPoint([]byte) (x, y *big.Int)
- MarshalIntegerSecret(d *big.Int) []byte
- UnmarshalIntegerSecret(d []byte) *big.Int
- GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error)
- Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error)
- Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool
- ValidateECDSA(x, y *big.Int, secret []byte) error
-}
-
-type EdDSACurve interface {
- Curve
- MarshalBytePoint(x []byte) []byte
- UnmarshalBytePoint([]byte) (x []byte)
- MarshalByteSecret(d []byte) []byte
- UnmarshalByteSecret(d []byte) []byte
- MarshalSignature(sig []byte) (r, s []byte)
- UnmarshalSignature(r, s []byte) (sig []byte)
- GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error)
- Sign(publicKey, privateKey, message []byte) (sig []byte, err error)
- Verify(publicKey, message, sig []byte) bool
- ValidateEdDSA(publicKey, privateKey []byte) (err error)
-}
-type ECDHCurve interface {
- Curve
- MarshalBytePoint([]byte) (encoded []byte)
- UnmarshalBytePoint(encoded []byte) ([]byte)
- MarshalByteSecret(d []byte) []byte
- UnmarshalByteSecret(d []byte) []byte
- GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error)
- Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error)
- Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error)
- ValidateECDH(public []byte, secret []byte) error
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go
deleted file mode 100644
index 29f6cba9..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed25519lib "github.com/cloudflare/circl/sign/ed25519"
-)
-
-const ed25519Size = 32
-type ed25519 struct {}
-
-func NewEd25519() *ed25519 {
- return &ed25519{}
-}
-
-func (c *ed25519) GetCurveName() string {
- return "ed25519"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) MarshalBytePoint(x []byte) []byte {
- return append([]byte{0x40}, x...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) {
- if len(point) != ed25519lib.PublicKeySize + 1 {
- return nil
- }
-
- // Return unprefixed
- return point[1:]
-}
-
-// MarshalByteSecret encodes a scalar in native format.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) MarshalByteSecret(d []byte) []byte {
- return d
-}
-
-// UnmarshalByteSecret decodes a scalar in native format and re-adds the stripped leading zeroes
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) {
- if len(s) > ed25519lib.SeedSize {
- return nil
- }
-
- // Handle stripped leading zeroes
- d = make([]byte, ed25519lib.SeedSize)
- copy(d[ed25519lib.SeedSize - len(s):], s)
- return
-}
-
-// MarshalSignature splits a signature in R and S.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1
-func (c *ed25519) MarshalSignature(sig []byte) (r, s []byte) {
- return sig[:ed25519Size], sig[ed25519Size:]
-}
-
-// UnmarshalSignature decodes R and S in the native format, re-adding the stripped leading zeroes
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1
-func (c *ed25519) UnmarshalSignature(r, s []byte) (sig []byte) {
- // Check size
- if len(r) > 32 || len(s) > 32 {
- return nil
- }
-
- sig = make([]byte, ed25519lib.SignatureSize)
-
- // Handle stripped leading zeroes
- copy(sig[ed25519Size-len(r):ed25519Size], r)
- copy(sig[ed25519lib.SignatureSize-len(s):], s)
- return sig
-}
-
-func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) {
- pk, sk, err := ed25519lib.GenerateKey(rand)
-
- if err != nil {
- return nil, nil, err
- }
-
- return pk, sk[:ed25519lib.SeedSize], nil
-}
-
-func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey {
- return append(privateKey, publicKey...)
-}
-
-func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) {
- sig = ed25519lib.Sign(getEd25519Sk(publicKey, privateKey), message)
- return sig, nil
-}
-
-func (c *ed25519) Verify(publicKey, message, sig []byte) bool {
- return ed25519lib.Verify(publicKey, message, sig)
-}
-
-func (c *ed25519) ValidateEdDSA(publicKey, privateKey []byte) (err error) {
- priv := getEd25519Sk(publicKey, privateKey)
- expectedPriv := ed25519lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 {
- return errors.KeyInvalidError("ecc: invalid ed25519 secret")
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go
deleted file mode 100644
index a2df3dab..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed448lib "github.com/cloudflare/circl/sign/ed448"
-)
-
-type ed448 struct {}
-
-func NewEd448() *ed448 {
- return &ed448{}
-}
-
-func (c *ed448) GetCurveName() string {
- return "ed448"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) MarshalBytePoint(x []byte) []byte {
- // Return prefixed
- return append([]byte{0x40}, x...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) {
- if len(point) != ed448lib.PublicKeySize + 1 {
- return nil
- }
-
- // Strip prefix
- return point[1:]
-}
-
-// MarshalByteSecret encoded a scalar from native format to prefixed.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) MarshalByteSecret(d []byte) []byte {
- // Return prefixed
- return append([]byte{0x40}, d...)
-}
-
-// UnmarshalByteSecret decodes a scalar from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) {
- // Check prefixed size
- if len(s) != ed448lib.SeedSize + 1 {
- return nil
- }
-
- // Strip prefix
- return s[1:]
-}
-
-// MarshalSignature splits a signature in R and S, where R is in prefixed native format and
-// S is an MPI with value zero.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2
-func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) {
- return append([]byte{0x40}, sig...), []byte{}
-}
-
-// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2
-func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) {
- if len(r) != ed448lib.SignatureSize + 1 {
- return nil
- }
-
- return r[1:]
-}
-
-func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) {
- pk, sk, err := ed448lib.GenerateKey(rand)
-
- if err != nil {
- return nil, nil, err
- }
-
- return pk, sk[:ed448lib.SeedSize], nil
-}
-
-func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey {
- return append(privateKey, publicKey...)
-}
-
-func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
- sig = ed448lib.Sign(getEd448Sk(publicKey, privateKey), message, "")
-
- return sig, nil
-}
-
-func (c *ed448) Verify(publicKey, message, sig []byte) bool {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
- return ed448lib.Verify(publicKey, message, sig, "")
-}
-
-func (c *ed448) ValidateEdDSA(publicKey, privateKey []byte) (err error) {
- priv := getEd448Sk(publicKey, privateKey)
- expectedPriv := ed448lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 {
- return errors.KeyInvalidError("ecc: invalid ed448 secret")
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go
deleted file mode 100644
index e28d7c71..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "fmt"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "io"
- "math/big"
-)
-
-type genericCurve struct {
- Curve elliptic.Curve
-}
-
-func NewGenericCurve(c elliptic.Curve) *genericCurve {
- return &genericCurve{
- Curve: c,
- }
-}
-
-func (c *genericCurve) GetCurveName() string {
- return c.Curve.Params().Name
-}
-
-func (c *genericCurve) MarshalBytePoint(point []byte) []byte {
- return point
-}
-
-func (c *genericCurve) UnmarshalBytePoint(point []byte) []byte {
- return point
-}
-
-func (c *genericCurve) MarshalIntegerPoint(x, y *big.Int) []byte {
- return elliptic.Marshal(c.Curve, x, y)
-}
-
-func (c *genericCurve) UnmarshalIntegerPoint(point []byte) (x, y *big.Int) {
- return elliptic.Unmarshal(c.Curve, point)
-}
-
-func (c *genericCurve) MarshalByteSecret(d []byte) []byte {
- return d
-}
-
-func (c *genericCurve) UnmarshalByteSecret(d []byte) []byte {
- return d
-}
-
-func (c *genericCurve) MarshalIntegerSecret(d *big.Int) []byte {
- return d.Bytes()
-}
-
-func (c *genericCurve) UnmarshalIntegerSecret(d []byte) *big.Int {
- return new(big.Int).SetBytes(d)
-}
-
-func (c *genericCurve) GenerateECDH(rand io.Reader) (point, secret []byte, err error) {
- secret, x, y, err := elliptic.GenerateKey(c.Curve, rand)
- if err != nil {
- return nil, nil, err
- }
-
- point = elliptic.Marshal(c.Curve, x, y)
- return point, secret, nil
-}
-
-func (c *genericCurve) GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) {
- priv, err := ecdsa.GenerateKey(c.Curve, rand)
- if err != nil {
- return
- }
-
- return priv.X, priv.Y, priv.D, nil
-}
-
-func (c *genericCurve) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- xP, yP := elliptic.Unmarshal(c.Curve, point)
- if xP == nil {
- panic("invalid point")
- }
-
- d, x, y, err := elliptic.GenerateKey(c.Curve, rand)
- if err != nil {
- return nil, nil, err
- }
-
- vsG := elliptic.Marshal(c.Curve, x, y)
- zbBig, _ := c.Curve.ScalarMult(xP, yP, d)
-
- byteLen := (c.Curve.Params().BitSize + 7) >> 3
- zb := make([]byte, byteLen)
- zbBytes := zbBig.Bytes()
- copy(zb[byteLen-len(zbBytes):], zbBytes)
-
- return vsG, zb, nil
-}
-
-func (c *genericCurve) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) {
- x, y := elliptic.Unmarshal(c.Curve, ephemeral)
- zbBig, _ := c.Curve.ScalarMult(x, y, secret)
- byteLen := (c.Curve.Params().BitSize + 7) >> 3
- zb := make([]byte, byteLen)
- zbBytes := zbBig.Bytes()
- copy(zb[byteLen-len(zbBytes):], zbBytes)
-
- return zb, nil
-}
-
-func (c *genericCurve) Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) {
- priv := &ecdsa.PrivateKey{D: d, PublicKey: ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}}
- return ecdsa.Sign(rand, priv, hash)
-}
-
-func (c *genericCurve) Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool {
- pub := &ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}
- return ecdsa.Verify(pub, hash, r, s)
-}
-
-func (c *genericCurve) validate(xP, yP *big.Int, secret []byte) error {
- // the public point should not be at infinity (0,0)
- zero := new(big.Int)
- if xP.Cmp(zero) == 0 && yP.Cmp(zero) == 0 {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", c.Curve.Params().Name))
- }
-
- // re-derive the public point Q' = (X,Y) = dG
- // to compare to declared Q in public key
- expectedX, expectedY := c.Curve.ScalarBaseMult(secret)
- if xP.Cmp(expectedX) != 0 || yP.Cmp(expectedY) != 0 {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name))
- }
-
- return nil
-}
-
-func (c *genericCurve) ValidateECDSA(xP, yP *big.Int, secret []byte) error {
- return c.validate(xP, yP, secret)
-}
-
-func (c *genericCurve) ValidateECDH(point []byte, secret []byte) error {
- xP, yP := elliptic.Unmarshal(c.Curve, point)
- if xP == nil {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name))
- }
-
- return c.validate(xP, yP, secret)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go
deleted file mode 100644
index 4a940b4f..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x448lib "github.com/cloudflare/circl/dh/x448"
-)
-
-type x448 struct {}
-
-func NewX448() *x448 {
- return &x448{}
-}
-
-func (c *x448) GetCurveName() string {
- return "x448"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *x448) MarshalBytePoint(point []byte) []byte {
- return append([]byte{0x40}, point...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *x448) UnmarshalBytePoint(point []byte) []byte {
- if len(point) != x448lib.Size + 1 {
- return nil
- }
-
- return point[1:]
-}
-
-// MarshalByteSecret encoded a scalar from native format to prefixed.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2
-func (c *x448) MarshalByteSecret(d []byte) []byte {
- return append([]byte{0x40}, d...)
-}
-
-// UnmarshalByteSecret decodes a scalar from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2
-func (c *x448) UnmarshalByteSecret(d []byte) []byte {
- if len(d) != x448lib.Size + 1 {
- return nil
- }
-
- // Store without prefix
- return d[1:]
-}
-
-func (c *x448) generateKeyPairBytes(rand io.Reader) (sk, pk x448lib.Key, err error) {
- if _, err = rand.Read(sk[:]); err != nil {
- return
- }
-
- x448lib.KeyGen(&pk, &sk)
- return
-}
-
-func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) {
- priv, pub, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return
- }
-
- return pub[:], priv[:], nil
-}
-
-func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- var pk, ss x448lib.Key
- seed, e, err := c.generateKeyPairBytes(rand)
-
- copy(pk[:], point)
- x448lib.Shared(&ss, &seed, &pk)
-
- return e[:], ss[:], nil
-}
-
-func (c *x448) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) {
- var ss, sk, e x448lib.Key
-
- copy(sk[:], secret)
- copy(e[:], ephemeral)
- x448lib.Shared(&ss, &sk, &e)
-
- return ss[:], nil
-}
-
-func (c *x448) ValidateECDH(point []byte, secret []byte) error {
- var sk, pk, expectedPk x448lib.Key
-
- copy(pk[:], point)
- copy(sk[:], secret)
- x448lib.KeyGen(&expectedPk, &sk)
-
- if subtle.ConstantTimeCompare(expectedPk[:], pk[:]) == 0 {
- return errors.KeyInvalidError("ecc: invalid curve25519 public point")
- }
-
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go
deleted file mode 100644
index 6c921481..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package encoding implements openpgp packet field encodings as specified in
-// RFC 4880 and 6637.
-package encoding
-
-import "io"
-
-// Field is an encoded field of an openpgp packet.
-type Field interface {
- // Bytes returns the decoded data.
- Bytes() []byte
-
- // BitLength is the size in bits of the decoded data.
- BitLength() uint16
-
- // EncodedBytes returns the encoded data.
- EncodedBytes() []byte
-
- // EncodedLength is the size in bytes of the encoded data.
- EncodedLength() uint16
-
- // ReadFrom reads the next Field from r.
- ReadFrom(r io.Reader) (int64, error)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go
deleted file mode 100644
index 02e5e695..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package encoding
-
-import (
- "io"
- "math/big"
- "math/bits"
-)
-
-// An MPI is used to store the contents of a big integer, along with the bit
-// length that was specified in the original input. This allows the MPI to be
-// reserialized exactly.
-type MPI struct {
- bytes []byte
- bitLength uint16
-}
-
-// NewMPI returns a MPI initialized with bytes.
-func NewMPI(bytes []byte) *MPI {
- for len(bytes) != 0 && bytes[0] == 0 {
- bytes = bytes[1:]
- }
- if len(bytes) == 0 {
- bitLength := uint16(0)
- return &MPI{bytes, bitLength}
- }
- bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0]))
- return &MPI{bytes, bitLength}
-}
-
-// Bytes returns the decoded data.
-func (m *MPI) Bytes() []byte {
- return m.bytes
-}
-
-// BitLength is the size in bits of the decoded data.
-func (m *MPI) BitLength() uint16 {
- return m.bitLength
-}
-
-// EncodedBytes returns the encoded data.
-func (m *MPI) EncodedBytes() []byte {
- return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...)
-}
-
-// EncodedLength is the size in bytes of the encoded data.
-func (m *MPI) EncodedLength() uint16 {
- return uint16(2 + len(m.bytes))
-}
-
-// ReadFrom reads into m the next MPI from r.
-func (m *MPI) ReadFrom(r io.Reader) (int64, error) {
- var buf [2]byte
- n, err := io.ReadFull(r, buf[0:])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return int64(n), err
- }
-
- m.bitLength = uint16(buf[0])<<8 | uint16(buf[1])
- m.bytes = make([]byte, (int(m.bitLength)+7)/8)
-
- nn, err := io.ReadFull(r, m.bytes)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
-
- // remove leading zero bytes from malformed GnuPG encoded MPIs:
- // https://bugs.gnupg.org/gnupg/issue1853
- // for _, b := range m.bytes {
- // if b != 0 {
- // break
- // }
- // m.bytes = m.bytes[1:]
- // m.bitLength -= 8
- // }
-
- return int64(n) + int64(nn), err
-}
-
-// SetBig initializes m with the bits from n.
-func (m *MPI) SetBig(n *big.Int) *MPI {
- m.bytes = n.Bytes()
- m.bitLength = uint16(n.BitLen())
- return m
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go
deleted file mode 100644
index c9df9fe2..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package encoding
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// OID is used to store a variable-length field with a one-octet size
-// prefix. See https://tools.ietf.org/html/rfc6637#section-9.
-type OID struct {
- bytes []byte
-}
-
-const (
- // maxOID is the maximum number of bytes in a OID.
- maxOID = 254
- // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC
- // specifies are reserved.
- reservedOIDLength1 = 0
- reservedOIDLength2 = 0xff
-)
-
-// NewOID returns a OID initialized with bytes.
-func NewOID(bytes []byte) *OID {
- switch len(bytes) {
- case reservedOIDLength1, reservedOIDLength2:
- panic("encoding: NewOID argument length is reserved")
- default:
- if len(bytes) > maxOID {
- panic("encoding: NewOID argument too large")
- }
- }
-
- return &OID{
- bytes: bytes,
- }
-}
-
-// Bytes returns the decoded data.
-func (o *OID) Bytes() []byte {
- return o.bytes
-}
-
-// BitLength is the size in bits of the decoded data.
-func (o *OID) BitLength() uint16 {
- return uint16(len(o.bytes) * 8)
-}
-
-// EncodedBytes returns the encoded data.
-func (o *OID) EncodedBytes() []byte {
- return append([]byte{byte(len(o.bytes))}, o.bytes...)
-}
-
-// EncodedLength is the size in bytes of the encoded data.
-func (o *OID) EncodedLength() uint16 {
- return uint16(1 + len(o.bytes))
-}
-
-// ReadFrom reads into b the next OID from r.
-func (o *OID) ReadFrom(r io.Reader) (int64, error) {
- var buf [1]byte
- n, err := io.ReadFull(r, buf[:])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return int64(n), err
- }
-
- switch buf[0] {
- case reservedOIDLength1, reservedOIDLength2:
- return int64(n), errors.UnsupportedError("reserved for future extensions")
- }
-
- o.bytes = make([]byte, buf[0])
-
- nn, err := io.ReadFull(r, o.bytes)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
-
- return int64(n) + int64(nn), err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go
deleted file mode 100644
index 0e71934c..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- goerrors "errors"
- "io"
- "math/big"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
-)
-
-// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
-// single identity composed of the given full name, comment and email, any of
-// which may be empty but must not contain any of "()<>\x00".
-// If config is nil, sensible defaults will be used.
-func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
-
- // Generate a primary signing key
- primaryPrivRaw, err := newSigner(config)
- if err != nil {
- return nil, err
- }
- primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw)
- if config != nil && config.V5Keys {
- primary.UpgradeToV5()
- }
-
- e := &Entity{
- PrimaryKey: &primary.PublicKey,
- PrivateKey: primary,
- Identities: make(map[string]*Identity),
- Subkeys: []Subkey{},
- }
-
- err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs)
- if err != nil {
- return nil, err
- }
-
- // NOTE: No key expiry here, but we will not return this subkey in EncryptionKey()
- // if the primary/master key has expired.
- err = e.addEncryptionSubkey(config, creationTime, 0)
- if err != nil {
- return nil, err
- }
-
- return e, nil
-}
-
-func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
- return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs)
-}
-
-func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error {
- uid := packet.NewUserId(name, comment, email)
- if uid == nil {
- return errors.InvalidArgumentError("user id field contained invalid characters")
- }
-
- if _, ok := t.Identities[uid.Id]; ok {
- return errors.InvalidArgumentError("user id exist")
- }
-
- primary := t.PrivateKey
-
- isPrimaryId := len(t.Identities) == 0
-
- selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config)
- selfSignature.CreationTime = creationTime
- selfSignature.KeyLifetimeSecs = &keyLifetimeSecs
- selfSignature.IsPrimaryId = &isPrimaryId
- selfSignature.FlagsValid = true
- selfSignature.FlagSign = true
- selfSignature.FlagCertify = true
- selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14
- selfSignature.SEIPDv2 = config.AEAD() != nil
-
- // Set the PreferredHash for the SelfSignature from the packet.Config.
- // If it is not the must-implement algorithm from rfc4880bis, append that.
- hash, ok := algorithm.HashToHashId(config.Hash())
- if !ok {
- return errors.UnsupportedError("unsupported preferred hash function")
- }
-
- selfSignature.PreferredHash = []uint8{hash}
- if config.Hash() != crypto.SHA256 {
- selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256))
- }
-
- // Likewise for DefaultCipher.
- selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())}
- if config.Cipher() != packet.CipherAES128 {
- selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128))
- }
-
- // We set CompressionNone as the preferred compression algorithm because
- // of compression side channel attacks, then append the configured
- // DefaultCompressionAlgo if any is set (to signal support for cases
- // where the application knows that using compression is safe).
- selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)}
- if config.Compression() != packet.CompressionNone {
- selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression()))
- }
-
- // And for DefaultMode.
- modes := []uint8{uint8(config.AEAD().Mode())}
- if config.AEAD().Mode() != packet.AEADModeOCB {
- modes = append(modes, uint8(packet.AEADModeOCB))
- }
-
- // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB)
- for _, cipher := range selfSignature.PreferredSymmetric {
- for _, mode := range modes {
- selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode})
- }
- }
-
- // User ID binding signature
- err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config)
- if err != nil {
- return err
- }
- t.Identities[uid.Id] = &Identity{
- Name: uid.Id,
- UserId: uid,
- SelfSignature: selfSignature,
- Signatures: []*packet.Signature{selfSignature},
- }
- return nil
-}
-
-// AddSigningSubkey adds a signing keypair as a subkey to the Entity.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) AddSigningSubkey(config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
-
- subPrivRaw, err := newSigner(config)
- if err != nil {
- return err
- }
- sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw)
- sub.IsSubkey = true
- if config != nil && config.V5Keys {
- sub.UpgradeToV5()
- }
-
- subkey := Subkey{
- PublicKey: &sub.PublicKey,
- PrivateKey: sub,
- }
- subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config)
- subkey.Sig.CreationTime = creationTime
- subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs
- subkey.Sig.FlagsValid = true
- subkey.Sig.FlagSign = true
- subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config)
- subkey.Sig.EmbeddedSignature.CreationTime = creationTime
-
- err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config)
- if err != nil {
- return err
- }
-
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return err
- }
-
- e.Subkeys = append(e.Subkeys, subkey)
- return nil
-}
-
-// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) AddEncryptionSubkey(config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
- return e.addEncryptionSubkey(config, creationTime, keyLifetimeSecs)
-}
-
-func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error {
- subPrivRaw, err := newDecrypter(config)
- if err != nil {
- return err
- }
- sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw)
- sub.IsSubkey = true
- if config != nil && config.V5Keys {
- sub.UpgradeToV5()
- }
-
- subkey := Subkey{
- PublicKey: &sub.PublicKey,
- PrivateKey: sub,
- }
- subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config)
- subkey.Sig.CreationTime = creationTime
- subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs
- subkey.Sig.FlagsValid = true
- subkey.Sig.FlagEncryptStorage = true
- subkey.Sig.FlagEncryptCommunications = true
-
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return err
- }
-
- e.Subkeys = append(e.Subkeys, subkey)
- return nil
-}
-
-// Generates a signing key
-func newSigner(config *packet.Config) (signer interface{}, err error) {
- switch config.PublicKeyAlgorithm() {
- case packet.PubKeyAlgoRSA:
- bits := config.RSAModulusBits()
- if bits < 1024 {
- return nil, errors.InvalidArgumentError("bits must be >= 1024")
- }
- if config != nil && len(config.RSAPrimes) >= 2 {
- primes := config.RSAPrimes[0:2]
- config.RSAPrimes = config.RSAPrimes[2:]
- return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes)
- }
- return rsa.GenerateKey(config.Random(), bits)
- case packet.PubKeyAlgoEdDSA:
- curve := ecc.FindEdDSAByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
-
- priv, err := eddsa.GenerateKey(config.Random(), curve)
- if err != nil {
- return nil, err
- }
- return priv, nil
- case packet.PubKeyAlgoECDSA:
- curve := ecc.FindECDSAByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
-
- priv, err := ecdsa.GenerateKey(config.Random(), curve)
- if err != nil {
- return nil, err
- }
- return priv, nil
- default:
- return nil, errors.InvalidArgumentError("unsupported public key algorithm")
- }
-}
-
-// Generates an encryption/decryption key
-func newDecrypter(config *packet.Config) (decrypter interface{}, err error) {
- switch config.PublicKeyAlgorithm() {
- case packet.PubKeyAlgoRSA:
- bits := config.RSAModulusBits()
- if bits < 1024 {
- return nil, errors.InvalidArgumentError("bits must be >= 1024")
- }
- if config != nil && len(config.RSAPrimes) >= 2 {
- primes := config.RSAPrimes[0:2]
- config.RSAPrimes = config.RSAPrimes[2:]
- return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes)
- }
- return rsa.GenerateKey(config.Random(), bits)
- case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA:
- fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey
- case packet.PubKeyAlgoECDH:
- var kdf = ecdh.KDF{
- Hash: algorithm.SHA512,
- Cipher: algorithm.AES256,
- }
- curve := ecc.FindECDHByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
- return ecdh.GenerateKey(config.Random(), curve, kdf)
- default:
- return nil, errors.InvalidArgumentError("unsupported public key algorithm")
- }
-}
-
-var bigOne = big.NewInt(1)
-
-// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the
-// given bit size, using the given random source and prepopulated primes.
-func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) {
- priv := new(rsa.PrivateKey)
- priv.E = 65537
-
- if nprimes < 2 {
- return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2")
- }
-
- if bits < 1024 {
- return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024")
- }
-
- primes := make([]*big.Int, nprimes)
-
-NextSetOfPrimes:
- for {
- todo := bits
- // crypto/rand should set the top two bits in each prime.
- // Thus each prime has the form
- // p_i = 2^bitlen(p_i) × 0.11... (in base 2).
- // And the product is:
- // P = 2^todo × α
- // where α is the product of nprimes numbers of the form 0.11...
- //
- // If α < 1/2 (which can happen for nprimes > 2), we need to
- // shift todo to compensate for lost bits: the mean value of 0.11...
- // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2
- // will give good results.
- if nprimes >= 7 {
- todo += (nprimes - 2) / 5
- }
- for i := 0; i < nprimes; i++ {
- var err error
- if len(prepopulatedPrimes) == 0 {
- primes[i], err = rand.Prime(random, todo/(nprimes-i))
- if err != nil {
- return nil, err
- }
- } else {
- primes[i] = prepopulatedPrimes[0]
- prepopulatedPrimes = prepopulatedPrimes[1:]
- }
-
- todo -= primes[i].BitLen()
- }
-
- // Make sure that primes is pairwise unequal.
- for i, prime := range primes {
- for j := 0; j < i; j++ {
- if prime.Cmp(primes[j]) == 0 {
- continue NextSetOfPrimes
- }
- }
- }
-
- n := new(big.Int).Set(bigOne)
- totient := new(big.Int).Set(bigOne)
- pminus1 := new(big.Int)
- for _, prime := range primes {
- n.Mul(n, prime)
- pminus1.Sub(prime, bigOne)
- totient.Mul(totient, pminus1)
- }
- if n.BitLen() != bits {
- // This should never happen for nprimes == 2 because
- // crypto/rand should set the top two bits in each prime.
- // For nprimes > 2 we hope it does not happen often.
- continue NextSetOfPrimes
- }
-
- priv.D = new(big.Int)
- e := big.NewInt(int64(priv.E))
- ok := priv.D.ModInverse(e, totient)
-
- if ok != nil {
- priv.Primes = primes
- priv.N = n
- break
- }
- }
-
- priv.Precompute()
- return priv, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
deleted file mode 100644
index 120f081a..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
+++ /dev/null
@@ -1,804 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- goerrors "errors"
- "io"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
-)
-
-// PublicKeyType is the armor type for a PGP public key.
-var PublicKeyType = "PGP PUBLIC KEY BLOCK"
-
-// PrivateKeyType is the armor type for a PGP private key.
-var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
-
-// An Entity represents the components of an OpenPGP key: a primary public key
-// (which must be a signing key), one or more identities claimed by that key,
-// and zero or more subkeys, which may be encryption keys.
-type Entity struct {
- PrimaryKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- Identities map[string]*Identity // indexed by Identity.Name
- Revocations []*packet.Signature
- Subkeys []Subkey
-}
-
-// An Identity represents an identity claimed by an Entity and zero or more
-// assertions by other entities about that claim.
-type Identity struct {
- Name string // by convention, has the form "Full Name (comment) "
- UserId *packet.UserId
- SelfSignature *packet.Signature
- Revocations []*packet.Signature
- Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures
-}
-
-// A Subkey is an additional public key in an Entity. Subkeys can be used for
-// encryption.
-type Subkey struct {
- PublicKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- Sig *packet.Signature
- Revocations []*packet.Signature
-}
-
-// A Key identifies a specific public key in an Entity. This is either the
-// Entity's primary key or a subkey.
-type Key struct {
- Entity *Entity
- PublicKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- SelfSignature *packet.Signature
- Revocations []*packet.Signature
-}
-
-// A KeyRing provides access to public and private keys.
-type KeyRing interface {
- // KeysById returns the set of keys that have the given key id.
- KeysById(id uint64) []Key
- // KeysByIdAndUsage returns the set of keys with the given id
- // that also meet the key usage given by requiredUsage.
- // The requiredUsage is expressed as the bitwise-OR of
- // packet.KeyFlag* values.
- KeysByIdUsage(id uint64, requiredUsage byte) []Key
- // DecryptionKeys returns all private keys that are valid for
- // decryption.
- DecryptionKeys() []Key
-}
-
-// PrimaryIdentity returns an Identity, preferring non-revoked identities,
-// identities marked as primary, or the latest-created identity, in that order.
-func (e *Entity) PrimaryIdentity() *Identity {
- var primaryIdentity *Identity
- for _, ident := range e.Identities {
- if shouldPreferIdentity(primaryIdentity, ident) {
- primaryIdentity = ident
- }
- }
- return primaryIdentity
-}
-
-func shouldPreferIdentity(existingId, potentialNewId *Identity) bool {
- if existingId == nil {
- return true
- }
-
- if len(existingId.Revocations) > len(potentialNewId.Revocations) {
- return true
- }
-
- if len(existingId.Revocations) < len(potentialNewId.Revocations) {
- return false
- }
-
- if existingId.SelfSignature == nil {
- return true
- }
-
- if existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId &&
- !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) {
- return false
- }
-
- if !(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) &&
- potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId {
- return true
- }
-
- return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime)
-}
-
-// EncryptionKey returns the best candidate Key for encrypting a message to the
-// given Entity.
-func (e *Entity) EncryptionKey(now time.Time) (Key, bool) {
- // Fail to find any encryption key if the...
- i := e.PrimaryIdentity()
- if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired
- i.SelfSignature == nil || // user ID has no self-signature
- i.SelfSignature.SigExpired(now) || // user ID self-signature has expired
- e.Revoked(now) || // primary key has been revoked
- i.Revoked(now) { // user ID has been revoked
- return Key{}, false
- }
-
- // Iterate the keys to find the newest, unexpired one
- candidateSubkey := -1
- var maxTime time.Time
- for i, subkey := range e.Subkeys {
- if subkey.Sig.FlagsValid &&
- subkey.Sig.FlagEncryptCommunications &&
- subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
- !subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
- !subkey.Sig.SigExpired(now) &&
- !subkey.Revoked(now) &&
- (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
- candidateSubkey = i
- maxTime = subkey.Sig.CreationTime
- }
- }
-
- if candidateSubkey != -1 {
- subkey := e.Subkeys[candidateSubkey]
- return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
- }
-
- // If we don't have any candidate subkeys for encryption and
- // the primary key doesn't have any usage metadata then we
- // assume that the primary key is ok. Or, if the primary key is
- // marked as ok to encrypt with, then we can obviously use it.
- if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
- e.PrimaryKey.PubKeyAlgo.CanEncrypt() {
- return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true
- }
-
- return Key{}, false
-}
-
-
-// CertificationKey return the best candidate Key for certifying a key with this
-// Entity.
-func (e *Entity) CertificationKey(now time.Time) (Key, bool) {
- return e.CertificationKeyById(now, 0)
-}
-
-// CertificationKeyById return the Key for key certification with this
-// Entity and keyID.
-func (e *Entity) CertificationKeyById(now time.Time, id uint64) (Key, bool) {
- return e.signingKeyByIdUsage(now, id, packet.KeyFlagCertify)
-}
-
-// SigningKey return the best candidate Key for signing a message with this
-// Entity.
-func (e *Entity) SigningKey(now time.Time) (Key, bool) {
- return e.SigningKeyById(now, 0)
-}
-
-// SigningKeyById return the Key for signing a message with this
-// Entity and keyID.
-func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) {
- return e.signingKeyByIdUsage(now, id, packet.KeyFlagSign)
-}
-
-func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) {
- // Fail to find any signing key if the...
- i := e.PrimaryIdentity()
- if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired
- i.SelfSignature == nil || // user ID has no self-signature
- i.SelfSignature.SigExpired(now) || // user ID self-signature has expired
- e.Revoked(now) || // primary key has been revoked
- i.Revoked(now) { // user ID has been revoked
- return Key{}, false
- }
-
- // Iterate the keys to find the newest, unexpired one
- candidateSubkey := -1
- var maxTime time.Time
- for idx, subkey := range e.Subkeys {
- if subkey.Sig.FlagsValid &&
- (flags & packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) &&
- (flags & packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) &&
- subkey.PublicKey.PubKeyAlgo.CanSign() &&
- !subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
- !subkey.Sig.SigExpired(now) &&
- !subkey.Revoked(now) &&
- (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) &&
- (id == 0 || subkey.PublicKey.KeyId == id) {
- candidateSubkey = idx
- maxTime = subkey.Sig.CreationTime
- }
- }
-
- if candidateSubkey != -1 {
- subkey := e.Subkeys[candidateSubkey]
- return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
- }
-
- // If we have no candidate subkey then we assume that it's ok to sign
- // with the primary key. Or, if the primary key is marked as ok to
- // sign with, then we can use it.
- if !i.SelfSignature.FlagsValid || (
- (flags & packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) &&
- (flags & packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign)) &&
- e.PrimaryKey.PubKeyAlgo.CanSign() &&
- (id == 0 || e.PrimaryKey.KeyId == id) {
- return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true
- }
-
- // No keys with a valid Signing Flag or no keys matched the id passed in
- return Key{}, false
-}
-
-func revoked(revocations []*packet.Signature, now time.Time) bool {
- for _, revocation := range revocations {
- if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised {
- // If the key is compromised, the key is considered revoked even before the revocation date.
- return true
- }
- if !revocation.SigExpired(now) {
- return true
- }
- }
- return false
-}
-
-// Revoked returns whether the entity has any direct key revocation signatures.
-// Note that third-party revocation signatures are not supported.
-// Note also that Identity and Subkey revocation should be checked separately.
-func (e *Entity) Revoked(now time.Time) bool {
- return revoked(e.Revocations, now)
-}
-
-// Revoked returns whether the identity has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-func (i *Identity) Revoked(now time.Time) bool {
- return revoked(i.Revocations, now)
-}
-
-// Revoked returns whether the subkey has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-func (s *Subkey) Revoked(now time.Time) bool {
- return revoked(s.Revocations, now)
-}
-
-// Revoked returns whether the key or subkey has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-// Note also that Identity revocation should be checked separately.
-// Normally, it's not necessary to call this function, except on keys returned by
-// KeysById or KeysByIdUsage.
-func (key *Key) Revoked(now time.Time) bool {
- return revoked(key.Revocations, now)
-}
-
-// An EntityList contains one or more Entities.
-type EntityList []*Entity
-
-// KeysById returns the set of keys that have the given key id.
-func (el EntityList) KeysById(id uint64) (keys []Key) {
- for _, e := range el {
- if e.PrimaryKey.KeyId == id {
- ident := e.PrimaryIdentity()
- selfSig := ident.SelfSignature
- keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations})
- }
-
- for _, subKey := range e.Subkeys {
- if subKey.PublicKey.KeyId == id {
- keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
- }
- }
- }
- return
-}
-
-// KeysByIdAndUsage returns the set of keys with the given id that also meet
-// the key usage given by requiredUsage. The requiredUsage is expressed as
-// the bitwise-OR of packet.KeyFlag* values.
-func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
- for _, key := range el.KeysById(id) {
- if key.SelfSignature != nil && key.SelfSignature.FlagsValid && requiredUsage != 0 {
- var usage byte
- if key.SelfSignature.FlagCertify {
- usage |= packet.KeyFlagCertify
- }
- if key.SelfSignature.FlagSign {
- usage |= packet.KeyFlagSign
- }
- if key.SelfSignature.FlagEncryptCommunications {
- usage |= packet.KeyFlagEncryptCommunications
- }
- if key.SelfSignature.FlagEncryptStorage {
- usage |= packet.KeyFlagEncryptStorage
- }
- if usage&requiredUsage != requiredUsage {
- continue
- }
- }
-
- keys = append(keys, key)
- }
- return
-}
-
-// DecryptionKeys returns all private keys that are valid for decryption.
-func (el EntityList) DecryptionKeys() (keys []Key) {
- for _, e := range el {
- for _, subKey := range e.Subkeys {
- if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
- keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
- }
- }
- }
- return
-}
-
-// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
-func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
- block, err := armor.Decode(r)
- if err == io.EOF {
- return nil, errors.InvalidArgumentError("no armored data found")
- }
- if err != nil {
- return nil, err
- }
- if block.Type != PublicKeyType && block.Type != PrivateKeyType {
- return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
- }
-
- return ReadKeyRing(block.Body)
-}
-
-// ReadKeyRing reads one or more public/private keys. Unsupported keys are
-// ignored as long as at least a single valid key is found.
-func ReadKeyRing(r io.Reader) (el EntityList, err error) {
- packets := packet.NewReader(r)
- var lastUnsupportedError error
-
- for {
- var e *Entity
- e, err = ReadEntity(packets)
- if err != nil {
- // TODO: warn about skipped unsupported/unreadable keys
- if _, ok := err.(errors.UnsupportedError); ok {
- lastUnsupportedError = err
- err = readToNextPublicKey(packets)
- } else if _, ok := err.(errors.StructuralError); ok {
- // Skip unreadable, badly-formatted keys
- lastUnsupportedError = err
- err = readToNextPublicKey(packets)
- }
- if err == io.EOF {
- err = nil
- break
- }
- if err != nil {
- el = nil
- break
- }
- } else {
- el = append(el, e)
- }
- }
-
- if len(el) == 0 && err == nil {
- err = lastUnsupportedError
- }
- return
-}
-
-// readToNextPublicKey reads packets until the start of the entity and leaves
-// the first packet of the new entity in the Reader.
-func readToNextPublicKey(packets *packet.Reader) (err error) {
- var p packet.Packet
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return
- } else if err != nil {
- if _, ok := err.(errors.UnsupportedError); ok {
- err = nil
- continue
- }
- return
- }
-
- if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
- packets.Unread(p)
- return
- }
- }
-}
-
-// ReadEntity reads an entity (public key, identities, subkeys etc) from the
-// given Reader.
-func ReadEntity(packets *packet.Reader) (*Entity, error) {
- e := new(Entity)
- e.Identities = make(map[string]*Identity)
-
- p, err := packets.Next()
- if err != nil {
- return nil, err
- }
-
- var ok bool
- if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
- if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
- packets.Unread(p)
- return nil, errors.StructuralError("first packet was not a public/private key")
- }
- e.PrimaryKey = &e.PrivateKey.PublicKey
- }
-
- if !e.PrimaryKey.PubKeyAlgo.CanSign() {
- return nil, errors.StructuralError("primary key cannot be used for signatures")
- }
-
- var revocations []*packet.Signature
-EachPacket:
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return nil, err
- }
-
- switch pkt := p.(type) {
- case *packet.UserId:
- if err := addUserID(e, packets, pkt); err != nil {
- return nil, err
- }
- case *packet.Signature:
- if pkt.SigType == packet.SigTypeKeyRevocation {
- revocations = append(revocations, pkt)
- } else if pkt.SigType == packet.SigTypeDirectSignature {
- // TODO: RFC4880 5.2.1 permits signatures
- // directly on keys (eg. to bind additional
- // revocation keys).
- }
- // Else, ignoring the signature as it does not follow anything
- // we would know to attach it to.
- case *packet.PrivateKey:
- if pkt.IsSubkey == false {
- packets.Unread(p)
- break EachPacket
- }
- err = addSubkey(e, packets, &pkt.PublicKey, pkt)
- if err != nil {
- return nil, err
- }
- case *packet.PublicKey:
- if pkt.IsSubkey == false {
- packets.Unread(p)
- break EachPacket
- }
- err = addSubkey(e, packets, pkt, nil)
- if err != nil {
- return nil, err
- }
- default:
- // we ignore unknown packets
- }
- }
-
- if len(e.Identities) == 0 {
- return nil, errors.StructuralError("entity without any identities")
- }
-
- for _, revocation := range revocations {
- err = e.PrimaryKey.VerifyRevocationSignature(revocation)
- if err == nil {
- e.Revocations = append(e.Revocations, revocation)
- } else {
- // TODO: RFC 4880 5.2.3.15 defines revocation keys.
- return nil, errors.StructuralError("revocation signature signed by alternate key")
- }
- }
-
- return e, nil
-}
-
-func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
- // Make a new Identity object, that we might wind up throwing away.
- // We'll only add it if we get a valid self-signature over this
- // userID.
- identity := new(Identity)
- identity.Name = pkt.Id
- identity.UserId = pkt
-
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return err
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- packets.Unread(p)
- break
- }
-
- if sig.SigType != packet.SigTypeGenericCert &&
- sig.SigType != packet.SigTypePersonaCert &&
- sig.SigType != packet.SigTypeCasualCert &&
- sig.SigType != packet.SigTypePositiveCert &&
- sig.SigType != packet.SigTypeCertificationRevocation {
- return errors.StructuralError("user ID signature with wrong type")
- }
-
- if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) {
- if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
- return errors.StructuralError("user ID self-signature invalid: " + err.Error())
- }
- if sig.SigType == packet.SigTypeCertificationRevocation {
- identity.Revocations = append(identity.Revocations, sig)
- } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) {
- identity.SelfSignature = sig
- }
- identity.Signatures = append(identity.Signatures, sig)
- e.Identities[pkt.Id] = identity
- } else {
- identity.Signatures = append(identity.Signatures, sig)
- }
- }
-
- return nil
-}
-
-func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
- var subKey Subkey
- subKey.PublicKey = pub
- subKey.PrivateKey = priv
-
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- packets.Unread(p)
- break
- }
-
- if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
- return errors.StructuralError("subkey signature with wrong type")
- }
-
- if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
-
- switch sig.SigType {
- case packet.SigTypeSubkeyRevocation:
- subKey.Revocations = append(subKey.Revocations, sig)
- case packet.SigTypeSubkeyBinding:
- if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) {
- subKey.Sig = sig
- }
- }
- }
-
- if subKey.Sig == nil {
- return errors.StructuralError("subkey packet not followed by signature")
- }
-
- e.Subkeys = append(e.Subkeys, subKey)
-
- return nil
-}
-
-// SerializePrivate serializes an Entity, including private key material, but
-// excluding signatures from other entities, to the given Writer.
-// Identities and subkeys are re-signed in case they changed since NewEntry.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
- if e.PrivateKey.Dummy() {
- return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities")
- }
- return e.serializePrivate(w, config, true)
-}
-
-// SerializePrivateWithoutSigning serializes an Entity, including private key
-// material, but excluding signatures from other entities, to the given Writer.
-// Self-signatures of identities and subkeys are not re-signed. This is useful
-// when serializing GNU dummy keys, among other things.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) {
- return e.serializePrivate(w, config, false)
-}
-
-func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) {
- if e.PrivateKey == nil {
- return goerrors.New("openpgp: private key is missing")
- }
- err = e.PrivateKey.Serialize(w)
- if err != nil {
- return
- }
- for _, revocation := range e.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, ident := range e.Identities {
- err = ident.UserId.Serialize(w)
- if err != nil {
- return
- }
- if reSign {
- if ident.SelfSignature == nil {
- return goerrors.New("openpgp: can't re-sign identity without valid self-signature")
- }
- err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
- if err != nil {
- return
- }
- }
- for _, sig := range ident.Signatures {
- err = sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- }
- for _, subkey := range e.Subkeys {
- err = subkey.PrivateKey.Serialize(w)
- if err != nil {
- return
- }
- if reSign {
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return
- }
- if subkey.Sig.EmbeddedSignature != nil {
- err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey,
- subkey.PrivateKey, config)
- if err != nil {
- return
- }
- }
- }
- for _, revocation := range subkey.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- err = subkey.Sig.Serialize(w)
- if err != nil {
- return
- }
- }
- return nil
-}
-
-// Serialize writes the public part of the given Entity to w, including
-// signatures from other entities. No private key material will be output.
-func (e *Entity) Serialize(w io.Writer) error {
- err := e.PrimaryKey.Serialize(w)
- if err != nil {
- return err
- }
- for _, revocation := range e.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, ident := range e.Identities {
- err = ident.UserId.Serialize(w)
- if err != nil {
- return err
- }
- for _, sig := range ident.Signatures {
- err = sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- }
- for _, subkey := range e.Subkeys {
- err = subkey.PublicKey.Serialize(w)
- if err != nil {
- return err
- }
- for _, revocation := range subkey.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- err = subkey.Sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// SignIdentity adds a signature to e, from signer, attesting that identity is
-// associated with e. The provided identity must already be an element of
-// e.Identities and the private key of signer must have been decrypted if
-// necessary.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
- certificationKey, ok := signer.CertificationKey(config.Now())
- if !ok {
- return errors.InvalidArgumentError("no valid certification key found")
- }
-
- if certificationKey.PrivateKey.Encrypted {
- return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
- }
-
- ident, ok := e.Identities[identity]
- if !ok {
- return errors.InvalidArgumentError("given identity string not found in Entity")
- }
-
- sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config)
-
- signingUserID := config.SigningUserId()
- if signingUserID != "" {
- if _, ok := signer.Identities[signingUserID]; !ok {
- return errors.InvalidArgumentError("signer identity string not found in signer Entity")
- }
- sig.SignerUserId = &signingUserID
- }
-
- if err := sig.SignUserId(identity, e.PrimaryKey, certificationKey.PrivateKey, config); err != nil {
- return err
- }
- ident.Signatures = append(ident.Signatures, sig)
- return nil
-}
-
-// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the
-// specified reason code and text (RFC4880 section-5.2.3.23).
-// If config is nil, sensible defaults will be used.
-func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error {
- revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config)
- revSig.RevocationReason = &reason
- revSig.RevocationReasonText = reasonText
-
- if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil {
- return err
- }
- e.Revocations = append(e.Revocations, revSig)
- return nil
-}
-
-// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for
-// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23).
-// If config is nil, sensible defaults will be used.
-func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error {
- if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil {
- return errors.InvalidArgumentError("given subkey is not associated with this key")
- }
-
- revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config)
- revSig.RevocationReason = &reason
- revSig.RevocationReasonText = reasonText
-
- if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil {
- return err
- }
-
- sk.Revocations = append(sk.Revocations, revSig)
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go
deleted file mode 100644
index 108fd096..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go
+++ /dev/null
@@ -1,538 +0,0 @@
-package openpgp
-
-const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56"
-const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98"
-const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f"
-const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011"
-
-const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Charset: UTF-8
-
-mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
-ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
-zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
-QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
-QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
-9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
-Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
-dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
-JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
-ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
-RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
-/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
-yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv
-2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR
-bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL
-C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP
-WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y
-MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA
-EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ
-MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N
-1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm
-+ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N
-lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW
-CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF
-4artDmrG
-=7FfJ
------END PGP PUBLIC KEY BLOCK-----`
-
-const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
-ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
-zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
-QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
-QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
-9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
-Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
-dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
-JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
-ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
-RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
-/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
-yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ
-UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe
-iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK
-FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8
-R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh
-+SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA
-EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO
-52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb
-u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl
-w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep
-54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+
-YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL
-bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E
-i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB
-DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1
-8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY
-s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745
-U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL
-6LCg2mg=
-=Dhm4
------END PGP PUBLIC KEY BLOCK-----`
-
-const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo
-7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom
-lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0
-E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC
-CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw
-6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH
-7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv
-X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7
-GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl
-y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw
-R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW
-CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+
-LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO
-aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx
-yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl
-BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr
-Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK
-CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp
-C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ
-SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/
-MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70=
-=vtbN
------END PGP PUBLIC KEY BLOCK-----`
-
-const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e
-DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/
-uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW
-ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx
-nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ
-x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg
-PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy
-9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
-1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2
-depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl
-aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2
-DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa
-XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU
-8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2
-b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD
-BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG
-0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N
-s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb
-tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0
-BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE
-/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7
-kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z
-VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa
-PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ
-snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi
-bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8
-K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X
-8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+
-TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb
-OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l
-QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V
-yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U
-heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB
-7qTZOahrETw=
-=IKnw
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: OpenPGP.js v4.10.10
-Comment: https://openpgpjs.org
-
-xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q
-lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN
-91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO
-XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb
-naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX
-8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB
-AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK
-ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS
-gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT
-6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ
-q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy
-+I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY
-bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j
-SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk
-kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu
-Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC
-GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW
-IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1
-mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG
-LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK
-zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/
-BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ
-aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD
-rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm
-yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2
-UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb
-YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt
-amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM
-u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/
-Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS
-gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB
-cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N
-nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN
-kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF
-I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb
-hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z
-FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw=
-=+2T8
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll
-JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+
-iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0
-yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e
-/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI
-q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V
-xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw
-ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB
-B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh
-BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u
-8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA
-mcG3/TwG5GSQUUPkrDsGDA==
-=mFWy
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR
-s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL
-EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0
-I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/
-lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
-AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7
-2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0
-69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9
-Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b
-wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW
-FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR
-AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM
-vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx
-22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj
-7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY
-AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044
-tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX
-JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl
-mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1
-8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC
-0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b
-4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl
-Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY
-=3fWu
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L
-plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz
-r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0
-I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ
-sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
-AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+
-3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm
-k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s
-GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G
-HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT
-CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR
-AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN
-MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j
-UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS
-YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs
-nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/
-U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es
-HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK
-lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg
-AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV
-UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM
-0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX
-12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa
-3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi
-wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6
-Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G
-1dh1WoUCyREZsJQg2YoIpWIcvw+a
-=bNRo
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Version: GnuPG v1
-
-lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0
-GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1
-I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB
-/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ
-CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl
-rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ
-6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD
-ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX
-M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED
-Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f
-yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk
-UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH
-e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl
-AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r
-oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo
-UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS
-YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ
-kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC
-WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW
-n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8
-/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3
-R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE
-VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix
-7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg==
-=F/T0
------END PGP PRIVATE KEY BLOCK-----`
-
-const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii
-B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS
-PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6
-lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA
-aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD
-Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v
-Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S
-rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH
-AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA
-iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+
-Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ
-AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou
-b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw==
-=KLN8
------END PGP PRIVATE KEY BLOCK-----`
-
-const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd
-oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk
-/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+
-gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r
-njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU
-iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK
-Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m
-kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR
-MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI
-zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A
-rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP
-CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr
-MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c
-bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd
-hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ
-3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS
-ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL
-Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7
-Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz
-YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ
-aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r
-sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV
-JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5
-EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt
-Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw
-vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ
-iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg
-UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y
-HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL
-Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy
-bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP
-Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic
-BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3
-v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV
-oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c
-wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ
-aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R
-fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa
-4g==
-=XZm8
------END PGP PRIVATE KEY BLOCK-----`
-
-// https://tests.sequoia-pgp.org/#Certificate_expiration
-// P _ U p
-const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
-/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
-/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
-5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
-X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
-9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
-qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
-SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
-vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w
-bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe
-ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY
-495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ
-mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t
-yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG
-Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN
-/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm
-pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR
-eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH
-DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05
-ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ
-UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT
-74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3
-ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/
-i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj
-LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/
-iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc
-selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n
-TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+
-Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm
-bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk
-jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6
-6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO//
-rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm
-U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR
-LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw
-sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx
-1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld
-I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ==
-=AmgT
------END PGP PUBLIC KEY BLOCK-----`
-
-const rsa2048PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4
-
-lQPGBGL07P0BCADL0etN8efyAXA6sL2WfQvHe5wEKYXPWeN2+jiqSppfeRZAOlzP
-kZ3U+cloeJriplYvVJwI3ID2aw52Z/TRn8iKRP5eOUFrEgcgl06lazLtOndK7o7p
-oBV5mLtHEirFHm6W61fNt10jzM0jx0PV6nseLhFB2J42F1cmU/aBgFo41wjLSZYr
-owR+v+O9S5sUXblQF6sEDcY01sBEu09zrIgT49VFwQ1Cvdh9XZEOTQBfdiugoj5a
-DS3fAqAka3r1VoQK4eR7/upnYSgSACGeaQ4pUelKku5rpm50gdWTY8ppq0k9e1eT
-y2x0OQcW3hWE+j4os1ca0ZEADMdqr/99MOxrABEBAAH+BwMCJWxU4VOZOJ7/I6vX
-FxdfBhIBEXlJ52FM3S/oYtXqLhkGyrtmZOeEazVvUtuCe3M3ScHI8xCthcmE8E0j
-bi+ZEHPS2NiBZtgHFF27BLn7zZuTc+oD5WKduZdK3463egnyThTqIIMl25WZBuab
-k5ycwYrWwBH0jfA4gwJ13ai4pufKC2RM8qIu6YAVPglYBKFLKGvvJHa5vI+LuA0E
-K+k35hIic7yVUcQneNnAF2598X5yWiieYnOZpmHlRw1zfbMwOJr3ZNj2v94u7b+L
-sTa/1Uv9887Vb6sJp0c2Sh4cwEccoPYkvMqFn3ZrJUr3UdDu1K2vWohPtswzhrYV
-+RdPZE5RLoCQufKvlPezk0Pzhzb3bBU7XjUbdGY1nH/EyQeBNp+Gw6qldKvzcBaB
-cyOK1c6hPSszpJX93m5UxCN55IeifmcNjmbDh8vGCCdajy6d56qV2n4F3k7vt1J1
-0UlxIGhqijJoaTCX66xjLMC6VXkSz6aHQ35rnXosm/cqPcQshsZTdlfSyWkorfdr
-4Hj8viBER26mjYurTMLBKDtUN724ZrR0Ev5jorX9uoKlgl87bDZHty2Ku2S+vR68
-VAvnj6Fi1BYNclnDoqxdRB2z5T9JbWE52HuG83/QsplhEqXxESDxriTyTHMbNxEe
-88soVCDh4tgflZFa2ucUr6gEKJKij7jgahARnyaXfPZlQBUAS1YUeILYmN+VR+M/
-sHENpwDWc7TInn8VN638nJV+ScZGMih3AwWZTIoiLju3MMt1K0YZ3NuiqwGH4Jwg
-/BbEdTWeCci9y3NEQHQ3uZZ5p6j2CwFVlK11idemCMvAiTVxF+gKdaLMkeCwKxru
-J3YzhKEo+iDVYbPYBYizx/EHBn2U5kITQ5SBXzjTaaFMNZJEf9JYsL1ybPB6HOFY
-VNVB2KT8CGVwtCJHb2xhbmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iQFO
-BBMBCgA4FiEEC6K7U7f4qesybTnqSkra7gHusm0FAmL07P0CGwMFCwkIBwIGFQoJ
-CAsCBBYCAwECHgECF4AACgkQSkra7gHusm1MvwgAxpClWkeSqIhMQfbiuz0+lOkE
-89y1DCFw8bHjZoUf4/4K8hFA3dGkk+q72XFgiyaCpfXxMt6Gi+dN47t+tTv9NIqC
-sukbaoJBmJDhN6+djmJOgOYy+FWsW2LAk2LOwKYulpnBZdcA5rlMAhBg7gevQpF+
-ruSU69P7UUaFJl/DC7hDmaIcj+4cjBE/HO26SnVQjoTfjZT82rDh1Wsuf8LnkJUk
-b3wezBLpXKjDvdHikdv4gdlR4AputVM38aZntYYglh/EASo5TneyZ7ZscdLNRdcF
-r5O2fKqrOJLOdaoYRFZZWOvP5GtEVFDU7WGivOSVfiszBE0wZR3dgZRJipHCXJ0D
-xgRi9Oz9AQgAtMJcJqLLVANJHl90tWuoizDkm+Imcwq2ubQAjpclnNrODnDK+7o4
-pBsWmXbZSdkC4gY+LhOQA6bPDD0JEHM58DOnrm49BddxXAyK0HPsk4sGGt2SS86B
-OawWNdfJVyqw4bAiHWDmQg4PcjBbt3ocOIxAR6I5kBSiQVxuGQs9T+Zvg3G1r3Or
-fS6DzlgY3HFUML5YsGH4lOxNSOoKAP68GIH/WNdUZ+feiRg9knIib6I3Hgtf5eO8
-JRH7aWE/TD7eNu36bLLjT5TZPq5r6xaD2plbtPOyXbNPWs9qI1yG+VnErfaLY0w8
-Qo0aqzbgID+CTZVomXSOpOcQseaFKw8ZfQARAQAB/gcDArha6+/+d4OY/w9N32K9
-hFNYt4LufTETMQ+k/sBeaMuAVzmT47DlAXzkrZhGW4dZOtXMu1rXaUwHlqkhEyzL
-L4MYEWVXfD+LbZNEK3MEFss6RK+UAMeT/PTV9aA8cXQVPcSJYzfBXHQ1U1hnOgrO
-apn92MN8RmkhX8wJLyeWTMMuP4lXByJMmmGo8WvifeRD2kFY4y0WVBDAXJAV4Ljf
-Di/bBiwoc5a+gxHuZT2W9ZSxBQJNXdt4Un2IlyZuo58s5MLx2N0EaNJ8PwRUE6fM
-RZYO8aZCEPUtINE4njbvsWOMCtrblsMPwZ1B0SiIaWmLaNyGdCNKea+fCIW7kasC
-JYMhnLumpUTXg5HNexkCsl7ABWj0PYBflOE61h8EjWpnQ7JBBVKS2ua4lMjwHRX7
-5o5yxym9k5UZNFdGoXVL7xpizCcdGawxTJvwhs3vBqu1ZWYCegOAZWDrOkCyhUpq
-8uKMROZFbn+FwE+7tjt+v2ed62FVEvD6g4V3ThCA6mQqeOARfJWN8GZY8BDm8lht
-crOXriUkrx+FlrgGtm2CkwjW5/9Xd7AhFpHnQdFeozOHyq1asNSgJF9sNi9Lz94W
-skQSVRi0IExxSXYGI3Y0nnAZUe2BAQflYPJdEveSr3sKlUqXiETTA1VXsTPK3kOC
-92CbLzj/Hz199jZvywwyu53I+GKMpF42rMq7zxr2oa61YWY4YE/GDezwwys/wLx/
-QpCW4X3ppI7wJjCSSqEV0baYZSSli1ayheS6dxi8QnSpX1Bmpz6gU7m/M9Sns+hl
-J7ZvgpjCAiV7KJTjtclr5/S02zP78LTVkoTWoz/6MOTROwaP63VBUXX8pbJhf/vu
-DLmNnDk8joMJxoDXWeNU0EnNl4hP7Z/jExRBOEO4oAnUf/Sf6gCWQhL5qcajtg6w
-tGv7vx3f2IkBNgQYAQoAIBYhBAuiu1O3+KnrMm056kpK2u4B7rJtBQJi9Oz9AhsM
-AAoJEEpK2u4B7rJt6lgIAMBWqP4BCOGnQXBbgJ0+ACVghpkFUXZTb/tXJc8UUvTM
-8uov6k/RsqDGZrvhhufD7Wwt7j9v7dD7VPp7bPyjVWyimglQzWguTUUqLDGlstYH
-5uYv1pzma0ZsAGNqFeGlTLsKOSGKFMH4rB2KfN2n51L8POvtp1y7GKZQbWIWneaB
-cZr3BINU5GMvYYU7pAYcoR+mJPdJx5Up3Ocn+bn8Tu1sy9C/ArtCQucazGnoE9u1
-HhNLrh0CdzzX7TNH6TQ8LwPOvq0K5l/WqbN9lE0WBBhMv2HydxhluO8AhU+A5GqC
-C+wET7nVDnhoOm/fstIeb7/LN7OYejKPeHdFBJEL9GA=
-=u442
------END PGP PRIVATE KEY BLOCK-----`
-
-const curve25519PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4
-
-lFgEYvTtQBYJKwYBBAHaRw8BAQdAxsNXLbrk5xOjpO24VhOMvQ0/F+JcyIkckMDH
-X3FIGxcAAQDFOlunZWYuPsCx5JLp78vKqUTfgef9TGG4oD6I/Sa0zBMstCJHb2xh
-bmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iJAEExYIADgWIQSFQHEOazmo
-h1ldII4MvfnLQ4JBNwUCYvTtQAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK
-CRAMvfnLQ4JBN5yeAQCKdry8B5ScCPrev2+UByMCss7Sdu5RhomCFsHdNPLcKAEA
-8ugei+1owHsV+3cGwWWzKk6sLa8ZN87i3SKuOGp9DQycXQRi9O1AEgorBgEEAZdV
-AQUBAQdA5CubPp8l7lrVQ25h7Hx5XN2C8xanRnnpcjzEooCaEA0DAQgHAAD/Rpc+
-sOZUXrFk9HOWB1XU41LoWbDBoG8sP8RWAVYwD5AQRYh4BBgWCAAgFiEEhUBxDms5
-qIdZXSCODL35y0OCQTcFAmL07UACGwwACgkQDL35y0OCQTcvdwEA7lb5g/YisrEf
-iq660uwMGoepLUfvtqKzuQ6heYe83y0BAN65Ffg5HYOJzUEi0kZQRf7OhdtuL2kJ
-SRXn8DmCTfEB
-=cELM
------END PGP PRIVATE KEY BLOCK-----`
-
-const curve448PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: C1DB 65D5 80D7 B922 7254 4B1E A699 9895 FABA CE52
-
-xYUEYV2UmRYDK2VxAc9AFyxgh5xnSbyt50TWl558mw9xdMN+/UBLr5+UMP8IsrvV
-MdXuTIE8CyaUQKSotHtH2RkYEXj5nsMAAAHPQIbTMSzjIWug8UFECzAex5FHgAgH
-gYF3RK+TS8D24wX8kOu2C/NoVxwGY+p+i0JHaB+7yljriSKAGxs6wsBEBB8WCgCD
-BYJhXZSZBYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlv
-bnMuc2VxdW9pYS1wZ3Aub3Jn5wSpIutJ5HncJWk4ruUV8GzQF390rR5+qWEAnAoY
-akcDFQoIApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAALzdA5dA/fsgYg/J
-qaQriYKaPUkyHL7EB3BXhV2d1h/gk+qJLvXQuU2WEJ/XSs3GrsBRiiZwvPH4o+7b
-mleAxjy5wpS523vqrrBR2YZ5FwIku7WS4litSdn4AtVam/TlLdMNIf41CtFeZKBe
-c5R5VNdQy8y7qy8AAADNEUN1cnZlNDQ4IE9wdGlvbiA4wsBHBBMWCgCGBYJhXZSZ
-BYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlvbnMuc2Vx
-dW9pYS1wZ3Aub3JnD55UsYMzE6OACP+mgw5zvT+BBgol8/uFQjHg4krjUCMDFQoI
-ApkBApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAPQJA5dA0Xqwzn/0uwCq
-RlsOVCB3f5NOj1exKnlBvRw0xT1VBee1yxvlUt5eIAoCxWoRlWBJob3TTkhm9AEA
-8dyhwPmyGfWHzPw5NFG3xsXrZdNXNvit9WMVAPcmsyR7teXuDlJItxRAdJJc/qfJ
-YVbBFoaNrhYAAADHhQRhXZSZFgMrZXEBz0BL7THZ9MnCLfSPJ1FMLim9eGkQ3Bfn
-M3he5rOwO3t14QI1LjI96OjkeJipMgcFAmEP1Bq/ZHGO7oAAAc9AFnE8iNBaT3OU
-EFtxkmWHXtdaYMmGGRdopw9JPXr/UxuunDln5o9dxPxf7q7z26zXrZen+qed/Isa
-HsDCwSwEGBYKAWsFgmFdlJkFiQWkj70JEKaZmJX6us5SRxQAAAAAAB4AIHNhbHRA
-bm90YXRpb25zLnNlcXVvaWEtcGdwLm9yZxREUizdTcepBzgSMOv2VWQCWbl++3CZ
-EbgAWDryvSsyApsCwDGgBBkWCgBvBYJhXZSZCRBKo3SL4S5djkcUAAAAAAAeACBz
-YWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmemoGTDjmNQiIzw6HOEddvS0OB7
-UZ/P07jM/EVmnYxTlBYhBAxsnkGpx1UCiH6gUUqjdIvhLl2OAAALYQOXQAMB1oKq
-OWxSFmvmgCKNcbAAyA3piF5ERIqs4z07oJvqDYrOWt75UsEIH/04gU/vHc4EmfG2
-JDLJgOLlyTUPkL/08f0ydGZPofFQBhn8HkuFFjnNtJ5oz3GIP4cdWMQFaUw0uvjb
-PM9Tm3ptENGd6Ts1AAAAFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAGpTA5dATR6i
-U2GrpUcQgpG+JqfAsGmF4yAOhgFxc1UfidFk3nTup3fLgjipkYY170WLRNbyKkVO
-Sodx93GAs58rizO1acDAWiLq3cyEPBFXbyFThbcNPcLl+/77Uk/mgkYrPQFAQWdK
-1kSRm4SizDBK37K8ChAAAADHhwRhXZSZEgMrZW8Bx0DMhzvhQo+OsXeqQ6QVw4sF
-CaexHh6rLohh7TzL3hQSjoJ27fV6JBkIWdn0LfrMlJIDbSv2SLdlgQMBCgkAAcdA
-MO7Dc1myF6Co1fAH+EuP+OxhxP/7V6ljuSCZENDfA49tQkzTta+PniG+pOVB2LHb
-huyaKBkqiaogo8LAOQQYFgoAeAWCYV2UmQWJBaSPvQkQppmYlfq6zlJHFAAAAAAA
-HgAgc2FsdEBub3RhdGlvbnMuc2VxdW9pYS1wZ3Aub3JnEjBMQAmc/2u45u5FQGmB
-QAytjSG2LM3JQN+PPVl5vEkCmwwWIQTB22XVgNe5InJUSx6mmZiV+rrOUgAASdYD
-l0DXEHQ9ykNP2rZP35ET1dmiFagFtTj/hLQcWlg16LqvJNGqOgYXuqTerbiOOt02
-XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d
-QgqsfguR1PqPuJxpXV4bSr6CGAAAAA==
-=MSvh
------END PGP PRIVATE KEY BLOCK-----`
-
-const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR
-ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u
-zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI
-CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA
-AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B
-FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG
-wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5
-8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT
-z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X
-joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F
-RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev
-VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP
-=Z8YJ
------END PGP PRIVATE KEY BLOCK-----
-`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
deleted file mode 100644
index fec41a0e..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import "math/bits"
-
-// CipherSuite contains a combination of Cipher and Mode
-type CipherSuite struct {
- // The cipher function
- Cipher CipherFunction
- // The AEAD mode of operation.
- Mode AEADMode
-}
-
-// AEADConfig collects a number of AEAD parameters along with sensible defaults.
-// A nil AEADConfig is valid and results in all default values.
-type AEADConfig struct {
- // The AEAD mode of operation.
- DefaultMode AEADMode
- // Amount of octets in each chunk of data
- ChunkSize uint64
-}
-
-// Mode returns the AEAD mode of operation.
-func (conf *AEADConfig) Mode() AEADMode {
- // If no preference is specified, OCB is used (which is mandatory to implement).
- if conf == nil || conf.DefaultMode == 0 {
- return AEADModeOCB
- }
-
- mode := conf.DefaultMode
- if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM {
- panic("AEAD mode unsupported")
- }
- return mode
-}
-
-// ChunkSizeByte returns the byte indicating the chunk size. The effective
-// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6)
-// limit to 16 = 4 MiB
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (conf *AEADConfig) ChunkSizeByte() byte {
- if conf == nil || conf.ChunkSize == 0 {
- return 12 // 1 << (12 + 6) == 262144 bytes
- }
-
- chunkSize := conf.ChunkSize
- exponent := bits.Len64(chunkSize) - 1
- switch {
- case exponent < 6:
- exponent = 6
- case exponent > 16:
- exponent = 16
- }
-
- return byte(exponent - 6)
-}
-
-// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the
-// maximum returned value is 1 << 30.
-func decodeAEADChunkSize(c byte) int {
- size := uint64(1 << (c + 6))
- if size != uint64(int(size)) {
- return 1 << 30
- }
- return int(size)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go
deleted file mode 100644
index a82b040b..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import (
- "bytes"
- "crypto/cipher"
- "encoding/binary"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption.
-type aeadCrypter struct {
- aead cipher.AEAD
- chunkSize int
- initialNonce []byte
- associatedData []byte // Chunk-independent associated data
- chunkIndex []byte // Chunk counter
- packetTag packetType
- bytesProcessed int // Amount of plaintext bytes encrypted/decrypted
- buffer bytes.Buffer // Buffered bytes across chunks
-}
-
-// computeNonce takes the incremental index and computes an eXclusive OR with
-// the least significant 8 bytes of the receivers' initial nonce (see sec.
-// 5.16.1 and 5.16.2). It returns the resulting nonce.
-func (wo *aeadCrypter) computeNextNonce() (nonce []byte) {
- if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected {
- return append(wo.initialNonce, wo.chunkIndex...)
- }
-
- nonce = make([]byte, len(wo.initialNonce))
- copy(nonce, wo.initialNonce)
- offset := len(wo.initialNonce) - 8
- for i := 0; i < 8; i++ {
- nonce[i+offset] ^= wo.chunkIndex[i]
- }
- return
-}
-
-// incrementIndex performs an integer increment by 1 of the integer represented by the
-// slice, modifying it accordingly.
-func (wo *aeadCrypter) incrementIndex() error {
- index := wo.chunkIndex
- if len(index) == 0 {
- return errors.AEADError("Index has length 0")
- }
- for i := len(index) - 1; i >= 0; i-- {
- if index[i] < 255 {
- index[i]++
- return nil
- }
- index[i] = 0
- }
- return errors.AEADError("cannot further increment index")
-}
-
-// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when
-// necessary, similar to aeadEncrypter.
-type aeadDecrypter struct {
- aeadCrypter // Embedded ciphertext opener
- reader io.Reader // 'reader' is a partialLengthReader
- peekedBytes []byte // Used to detect last chunk
- eof bool
-}
-
-// Read decrypts bytes and reads them into dst. It decrypts when necessary and
-// buffers extra decrypted bytes. It returns the number of bytes copied into dst
-// and an error.
-func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) {
- // Return buffered plaintext bytes from previous calls
- if ar.buffer.Len() > 0 {
- return ar.buffer.Read(dst)
- }
-
- // Return EOF if we've previously validated the final tag
- if ar.eof {
- return 0, io.EOF
- }
-
- // Read a chunk
- tagLen := ar.aead.Overhead()
- cipherChunkBuf := new(bytes.Buffer)
- _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize + tagLen))
- cipherChunk := cipherChunkBuf.Bytes()
- if errRead != nil && errRead != io.EOF {
- return 0, errRead
- }
- decrypted, errChunk := ar.openChunk(cipherChunk)
- if errChunk != nil {
- return 0, errChunk
- }
-
- // Return decrypted bytes, buffering if necessary
- if len(dst) < len(decrypted) {
- n = copy(dst, decrypted[:len(dst)])
- ar.buffer.Write(decrypted[len(dst):])
- } else {
- n = copy(dst, decrypted)
- }
-
- // Check final authentication tag
- if errRead == io.EOF {
- errChunk := ar.validateFinalTag(ar.peekedBytes)
- if errChunk != nil {
- return n, errChunk
- }
- ar.eof = true // Mark EOF for when we've returned all buffered data
- }
- return
-}
-
-// Close is noOp. The final authentication tag of the stream was already
-// checked in the last Read call. In the future, this function could be used to
-// wipe the reader and peeked, decrypted bytes, if necessary.
-func (ar *aeadDecrypter) Close() (err error) {
- return nil
-}
-
-// openChunk decrypts and checks integrity of an encrypted chunk, returning
-// the underlying plaintext and an error. It accesses peeked bytes from next
-// chunk, to identify the last chunk and decrypt/validate accordingly.
-func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) {
- tagLen := ar.aead.Overhead()
- // Restore carried bytes from last call
- chunkExtra := append(ar.peekedBytes, data...)
- // 'chunk' contains encrypted bytes, followed by an authentication tag.
- chunk := chunkExtra[:len(chunkExtra)-tagLen]
- ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:]
-
- adata := ar.associatedData
- if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- adata = append(ar.associatedData, ar.chunkIndex...)
- }
-
- nonce := ar.computeNextNonce()
- plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata)
- if err != nil {
- return nil, err
- }
- ar.bytesProcessed += len(plainChunk)
- if err = ar.aeadCrypter.incrementIndex(); err != nil {
- return nil, err
- }
- return plainChunk, nil
-}
-
-// Checks the summary tag. It takes into account the total decrypted bytes into
-// the associated data. It returns an error, or nil if the tag is valid.
-func (ar *aeadDecrypter) validateFinalTag(tag []byte) error {
- // Associated: tag, version, cipher, aead, chunk size, ...
- amountBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed))
-
- adata := ar.associatedData
- if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- // ... index ...
- adata = append(ar.associatedData, ar.chunkIndex...)
- }
-
- // ... and total number of encrypted octets
- adata = append(adata, amountBytes...)
- nonce := ar.computeNextNonce()
- _, err := ar.aead.Open(nil, nonce, tag, adata)
- if err != nil {
- return err
- }
- return nil
-}
-
-// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according
-// to the AEAD block size, and buffers the extra encrypted bytes for next write.
-type aeadEncrypter struct {
- aeadCrypter // Embedded plaintext sealer
- writer io.WriteCloser // 'writer' is a partialLengthWriter
-}
-
-
-// Write encrypts and writes bytes. It encrypts when necessary and buffers extra
-// plaintext bytes for next call. When the stream is finished, Close() MUST be
-// called to append the final tag.
-func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
- // Append plaintextBytes to existing buffered bytes
- n, err = aw.buffer.Write(plaintextBytes)
- if err != nil {
- return n, err
- }
- // Encrypt and write chunks
- for aw.buffer.Len() >= aw.chunkSize {
- plainChunk := aw.buffer.Next(aw.chunkSize)
- encryptedChunk, err := aw.sealChunk(plainChunk)
- if err != nil {
- return n, err
- }
- _, err = aw.writer.Write(encryptedChunk)
- if err != nil {
- return n, err
- }
- }
- return
-}
-
-// Close encrypts and writes the remaining buffered plaintext if any, appends
-// the final authentication tag, and closes the embedded writer. This function
-// MUST be called at the end of a stream.
-func (aw *aeadEncrypter) Close() (err error) {
- // Encrypt and write a chunk if there's buffered data left, or if we haven't
- // written any chunks yet.
- if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 {
- plainChunk := aw.buffer.Bytes()
- lastEncryptedChunk, err := aw.sealChunk(plainChunk)
- if err != nil {
- return err
- }
- _, err = aw.writer.Write(lastEncryptedChunk)
- if err != nil {
- return err
- }
- }
- // Compute final tag (associated data: packet tag, version, cipher, aead,
- // chunk size...
- adata := aw.associatedData
-
- if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- // ... index ...
- adata = append(aw.associatedData, aw.chunkIndex...)
- }
-
- // ... and total number of encrypted octets
- amountBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed))
- adata = append(adata, amountBytes...)
-
- nonce := aw.computeNextNonce()
- finalTag := aw.aead.Seal(nil, nonce, nil, adata)
- _, err = aw.writer.Write(finalTag)
- if err != nil {
- return err
- }
- return aw.writer.Close()
-}
-
-// sealChunk Encrypts and authenticates the given chunk.
-func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) {
- if len(data) > aw.chunkSize {
- return nil, errors.AEADError("chunk exceeds maximum length")
- }
- if aw.associatedData == nil {
- return nil, errors.AEADError("can't seal without headers")
- }
- adata := aw.associatedData
- if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- adata = append(aw.associatedData, aw.chunkIndex...)
- }
-
- nonce := aw.computeNextNonce()
- encrypted := aw.aead.Seal(nil, nonce, data, adata)
- aw.bytesProcessed += len(data)
- if err := aw.aeadCrypter.incrementIndex(); err != nil {
- return nil, err
- }
- return encrypted, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go
deleted file mode 100644
index 98bd876b..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// AEADEncrypted represents an AEAD Encrypted Packet.
-// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t
-type AEADEncrypted struct {
- cipher CipherFunction
- mode AEADMode
- chunkSizeByte byte
- Contents io.Reader // Encrypted chunks and tags
- initialNonce []byte // Referred to as IV in RFC4880-bis
-}
-
-// Only currently defined version
-const aeadEncryptedVersion = 1
-
-func (ae *AEADEncrypted) parse(buf io.Reader) error {
- headerData := make([]byte, 4)
- if n, err := io.ReadFull(buf, headerData); n < 4 {
- return errors.AEADError("could not read aead header:" + err.Error())
- }
- // Read initial nonce
- mode := AEADMode(headerData[2])
- nonceLen := mode.IvLength()
-
- // This packet supports only EAX and OCB
- // https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t
- if nonceLen == 0 || mode > AEADModeOCB {
- return errors.AEADError("unknown mode")
- }
-
- initialNonce := make([]byte, nonceLen)
- if n, err := io.ReadFull(buf, initialNonce); n < nonceLen {
- return errors.AEADError("could not read aead nonce:" + err.Error())
- }
- ae.Contents = buf
- ae.initialNonce = initialNonce
- c := headerData[1]
- if _, ok := algorithm.CipherById[c]; !ok {
- return errors.UnsupportedError("unknown cipher: " + string(c))
- }
- ae.cipher = CipherFunction(c)
- ae.mode = mode
- ae.chunkSizeByte = headerData[3]
- return nil
-}
-
-// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or
-// an error.
-func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) {
- return ae.decrypt(key)
-}
-
-// decrypt prepares an aeadCrypter and returns a ReadCloser from which
-// decrypted bytes can be read (see aeadDecrypter.Read()).
-func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) {
- blockCipher := ae.cipher.new(key)
- aead := ae.mode.new(blockCipher)
- // Carry the first tagLen bytes
- tagLen := ae.mode.TagLength()
- peekedBytes := make([]byte, tagLen)
- n, err := io.ReadFull(ae.Contents, peekedBytes)
- if n < tagLen || (err != nil && err != io.EOF) {
- return nil, errors.AEADError("Not enough data to decrypt:" + err.Error())
- }
- chunkSize := decodeAEADChunkSize(ae.chunkSizeByte)
- return &aeadDecrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: chunkSize,
- initialNonce: ae.initialNonce,
- associatedData: ae.associatedData(),
- chunkIndex: make([]byte, 8),
- packetTag: packetTypeAEADEncrypted,
- },
- reader: ae.Contents,
- peekedBytes: peekedBytes}, nil
-}
-
-// associatedData for chunks: tag, version, cipher, mode, chunk size byte
-func (ae *AEADEncrypted) associatedData() []byte {
- return []byte{
- 0xD4,
- aeadEncryptedVersion,
- byte(ae.cipher),
- byte(ae.mode),
- ae.chunkSizeByte}
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
deleted file mode 100644
index 2f5cad71..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "compress/bzip2"
- "compress/flate"
- "compress/zlib"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "io"
- "strconv"
-)
-
-// Compressed represents a compressed OpenPGP packet. The decompressed contents
-// will contain more OpenPGP packets. See RFC 4880, section 5.6.
-type Compressed struct {
- Body io.Reader
-}
-
-const (
- NoCompression = flate.NoCompression
- BestSpeed = flate.BestSpeed
- BestCompression = flate.BestCompression
- DefaultCompression = flate.DefaultCompression
-)
-
-// CompressionConfig contains compressor configuration settings.
-type CompressionConfig struct {
- // Level is the compression level to use. It must be set to
- // between -1 and 9, with -1 causing the compressor to use the
- // default compression level, 0 causing the compressor to use
- // no compression and 1 to 9 representing increasing (better,
- // slower) compression levels. If Level is less than -1 or
- // more then 9, a non-nil error will be returned during
- // encryption. See the constants above for convenient common
- // settings for Level.
- Level int
-}
-
-func (c *Compressed) parse(r io.Reader) error {
- var buf [1]byte
- _, err := readFull(r, buf[:])
- if err != nil {
- return err
- }
-
- switch buf[0] {
- case 0:
- c.Body = r
- case 1:
- c.Body = flate.NewReader(r)
- case 2:
- c.Body, err = zlib.NewReader(r)
- case 3:
- c.Body = bzip2.NewReader(r)
- default:
- err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
- }
-
- return err
-}
-
-// compressedWriterCloser represents the serialized compression stream
-// header and the compressor. Its Close() method ensures that both the
-// compressor and serialized stream header are closed. Its Write()
-// method writes to the compressor.
-type compressedWriteCloser struct {
- sh io.Closer // Stream Header
- c io.WriteCloser // Compressor
-}
-
-func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
- return cwc.c.Write(p)
-}
-
-func (cwc compressedWriteCloser) Close() (err error) {
- err = cwc.c.Close()
- if err != nil {
- return err
- }
-
- return cwc.sh.Close()
-}
-
-// SerializeCompressed serializes a compressed data packet to w and
-// returns a WriteCloser to which the literal data packets themselves
-// can be written and which MUST be closed on completion. If cc is
-// nil, sensible defaults will be used to configure the compression
-// algorithm.
-func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
- compressed, err := serializeStreamHeader(w, packetTypeCompressed)
- if err != nil {
- return
- }
-
- _, err = compressed.Write([]byte{uint8(algo)})
- if err != nil {
- return
- }
-
- level := DefaultCompression
- if cc != nil {
- level = cc.Level
- }
-
- var compressor io.WriteCloser
- switch algo {
- case CompressionZIP:
- compressor, err = flate.NewWriter(compressed, level)
- case CompressionZLIB:
- compressor, err = zlib.NewWriterLevel(compressed, level)
- default:
- s := strconv.Itoa(int(algo))
- err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
- }
- if err != nil {
- return
- }
-
- literaldata = compressedWriteCloser{compressed, compressor}
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
deleted file mode 100644
index 82ae5399..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "crypto/rand"
- "io"
- "math/big"
- "time"
-)
-
-// Config collects a number of parameters along with sensible defaults.
-// A nil *Config is valid and results in all default values.
-type Config struct {
- // Rand provides the source of entropy.
- // If nil, the crypto/rand Reader is used.
- Rand io.Reader
- // DefaultHash is the default hash function to be used.
- // If zero, SHA-256 is used.
- DefaultHash crypto.Hash
- // DefaultCipher is the cipher to be used.
- // If zero, AES-128 is used.
- DefaultCipher CipherFunction
- // Time returns the current time as the number of seconds since the
- // epoch. If Time is nil, time.Now is used.
- Time func() time.Time
- // DefaultCompressionAlgo is the compression algorithm to be
- // applied to the plaintext before encryption. If zero, no
- // compression is done.
- DefaultCompressionAlgo CompressionAlgo
- // CompressionConfig configures the compression settings.
- CompressionConfig *CompressionConfig
- // S2KCount is only used for symmetric encryption. It
- // determines the strength of the passphrase stretching when
- // the said passphrase is hashed to produce a key. S2KCount
- // should be between 1024 and 65011712, inclusive. If Config
- // is nil or S2KCount is 0, the value 65536 used. Not all
- // values in the above range can be represented. S2KCount will
- // be rounded up to the next representable value if it cannot
- // be encoded exactly. When set, it is strongly encrouraged to
- // use a value that is at least 65536. See RFC 4880 Section
- // 3.7.1.3.
- S2KCount int
- // RSABits is the number of bits in new RSA keys made with NewEntity.
- // If zero, then 2048 bit keys are created.
- RSABits int
- // The public key algorithm to use - will always create a signing primary
- // key and encryption subkey.
- Algorithm PublicKeyAlgorithm
- // Some known primes that are optionally prepopulated by the caller
- RSAPrimes []*big.Int
- // Curve configures the desired packet.Curve if the Algorithm is PubKeyAlgoECDSA,
- // PubKeyAlgoEdDSA, or PubKeyAlgoECDH. If empty Curve25519 is used.
- Curve Curve
- // AEADConfig configures the use of the new AEAD Encrypted Data Packet,
- // defined in the draft of the next version of the OpenPGP specification.
- // If a non-nil AEADConfig is passed, usage of this packet is enabled. By
- // default, it is disabled. See the documentation of AEADConfig for more
- // configuration options related to AEAD.
- // **Note: using this option may break compatibility with other OpenPGP
- // implementations, as well as future versions of this library.**
- AEADConfig *AEADConfig
- // V5Keys configures version 5 key generation. If false, this package still
- // supports version 5 keys, but produces version 4 keys.
- V5Keys bool
- // "The validity period of the key. This is the number of seconds after
- // the key creation time that the key expires. If this is not present
- // or has a value of zero, the key never expires. This is found only on
- // a self-signature.""
- // https://tools.ietf.org/html/rfc4880#section-5.2.3.6
- KeyLifetimeSecs uint32
- // "The validity period of the signature. This is the number of seconds
- // after the signature creation time that the signature expires. If
- // this is not present or has a value of zero, it never expires."
- // https://tools.ietf.org/html/rfc4880#section-5.2.3.10
- SigLifetimeSecs uint32
- // SigningKeyId is used to specify the signing key to use (by Key ID).
- // By default, the signing key is selected automatically, preferring
- // signing subkeys if available.
- SigningKeyId uint64
- // SigningIdentity is used to specify a user ID (packet Signer's User ID, type 28)
- // when producing a generic certification signature onto an existing user ID.
- // The identity must be present in the signer Entity.
- SigningIdentity string
- // InsecureAllowUnauthenticatedMessages controls, whether it is tolerated to read
- // encrypted messages without Modification Detection Code (MDC).
- // MDC is mandated by the IETF OpenPGP Crypto Refresh draft and has long been implemented
- // in most OpenPGP implementations. Messages without MDC are considered unnecessarily
- // insecure and should be prevented whenever possible.
- // In case one needs to deal with messages from very old OpenPGP implementations, there
- // might be no other way than to tolerate the missing MDC. Setting this flag, allows this
- // mode of operation. It should be considered a measure of last resort.
- InsecureAllowUnauthenticatedMessages bool
- // KnownNotations is a map of Notation Data names to bools, which controls
- // the notation names that are allowed to be present in critical Notation Data
- // signature subpackets.
- KnownNotations map[string]bool
- // SignatureNotations is a list of Notations to be added to any signatures.
- SignatureNotations []*Notation
-}
-
-func (c *Config) Random() io.Reader {
- if c == nil || c.Rand == nil {
- return rand.Reader
- }
- return c.Rand
-}
-
-func (c *Config) Hash() crypto.Hash {
- if c == nil || uint(c.DefaultHash) == 0 {
- return crypto.SHA256
- }
- return c.DefaultHash
-}
-
-func (c *Config) Cipher() CipherFunction {
- if c == nil || uint8(c.DefaultCipher) == 0 {
- return CipherAES128
- }
- return c.DefaultCipher
-}
-
-func (c *Config) Now() time.Time {
- if c == nil || c.Time == nil {
- return time.Now()
- }
- return c.Time()
-}
-
-// KeyLifetime returns the validity period of the key.
-func (c *Config) KeyLifetime() uint32 {
- if c == nil {
- return 0
- }
- return c.KeyLifetimeSecs
-}
-
-// SigLifetime returns the validity period of the signature.
-func (c *Config) SigLifetime() uint32 {
- if c == nil {
- return 0
- }
- return c.SigLifetimeSecs
-}
-
-func (c *Config) Compression() CompressionAlgo {
- if c == nil {
- return CompressionNone
- }
- return c.DefaultCompressionAlgo
-}
-
-func (c *Config) PasswordHashIterations() int {
- if c == nil || c.S2KCount == 0 {
- return 0
- }
- return c.S2KCount
-}
-
-func (c *Config) RSAModulusBits() int {
- if c == nil || c.RSABits == 0 {
- return 2048
- }
- return c.RSABits
-}
-
-func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm {
- if c == nil || c.Algorithm == 0 {
- return PubKeyAlgoRSA
- }
- return c.Algorithm
-}
-
-func (c *Config) CurveName() Curve {
- if c == nil || c.Curve == "" {
- return Curve25519
- }
- return c.Curve
-}
-
-func (c *Config) AEAD() *AEADConfig {
- if c == nil {
- return nil
- }
- return c.AEADConfig
-}
-
-func (c *Config) SigningKey() uint64 {
- if c == nil {
- return 0
- }
- return c.SigningKeyId
-}
-
-func (c *Config) SigningUserId() string {
- if c == nil {
- return ""
- }
- return c.SigningIdentity
-}
-
-func (c *Config) AllowUnauthenticatedMessages() bool {
- if c == nil {
- return false
- }
- return c.InsecureAllowUnauthenticatedMessages
-}
-
-func (c *Config) KnownNotation(notationName string) bool {
- if c == nil {
- return false
- }
- return c.KnownNotations[notationName]
-}
-
-func (c *Config) Notations() []*Notation {
- if c == nil {
- return nil
- }
- return c.SignatureNotations
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go
deleted file mode 100644
index eeff2902..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "crypto/rsa"
- "encoding/binary"
- "io"
- "math/big"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
-)
-
-const encryptedKeyVersion = 3
-
-// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
-// section 5.1.
-type EncryptedKey struct {
- KeyId uint64
- Algo PublicKeyAlgorithm
- CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet
- Key []byte // only valid after a successful Decrypt
-
- encryptedMPI1, encryptedMPI2 encoding.Field
-}
-
-func (e *EncryptedKey) parse(r io.Reader) (err error) {
- var buf [10]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- if buf[0] != encryptedKeyVersion {
- return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
- }
- e.KeyId = binary.BigEndian.Uint64(buf[1:9])
- e.Algo = PublicKeyAlgorithm(buf[9])
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoElGamal:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
-
- e.encryptedMPI2 = new(encoding.MPI)
- if _, err = e.encryptedMPI2.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoECDH:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
-
- e.encryptedMPI2 = new(encoding.OID)
- if _, err = e.encryptedMPI2.ReadFrom(r); err != nil {
- return
- }
- }
- _, err = consumeAll(r)
- return
-}
-
-func checksumKeyMaterial(key []byte) uint16 {
- var checksum uint16
- for _, v := range key {
- checksum += uint16(v)
- }
- return checksum
-}
-
-// Decrypt decrypts an encrypted session key with the given private key. The
-// private key must have been decrypted first.
-// If config is nil, sensible defaults will be used.
-func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
- if e.KeyId != 0 && e.KeyId != priv.KeyId {
- return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16))
- }
- if e.Algo != priv.PubKeyAlgo {
- return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
- }
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
-
- var err error
- var b []byte
-
- // TODO(agl): use session key decryption routines here to avoid
- // padding oracle attacks.
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- // Supports both *rsa.PrivateKey and crypto.Decrypter
- k := priv.PrivateKey.(crypto.Decrypter)
- b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil)
- case PubKeyAlgoElGamal:
- c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes())
- c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes())
- b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
- case PubKeyAlgoECDH:
- vsG := e.encryptedMPI1.Bytes()
- m := e.encryptedMPI2.Bytes()
- oid := priv.PublicKey.oid.EncodedBytes()
- b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:])
- default:
- err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
- }
-
- if err != nil {
- return err
- }
-
- e.CipherFunc = CipherFunction(b[0])
- if !e.CipherFunc.IsSupported() {
- return errors.UnsupportedError("unsupported encryption function")
- }
-
- e.Key = b[1 : len(b)-2]
- expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
- checksum := checksumKeyMaterial(e.Key)
- if checksum != expectedChecksum {
- return errors.StructuralError("EncryptedKey checksum incorrect")
- }
-
- return nil
-}
-
-// Serialize writes the encrypted key packet, e, to w.
-func (e *EncryptedKey) Serialize(w io.Writer) error {
- var mpiLen int
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- mpiLen = int(e.encryptedMPI1.EncodedLength())
- case PubKeyAlgoElGamal:
- mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength())
- case PubKeyAlgoECDH:
- mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength())
- default:
- return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
- }
-
- err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
- if err != nil {
- return err
- }
-
- w.Write([]byte{encryptedKeyVersion})
- binary.Write(w, binary.BigEndian, e.KeyId)
- w.Write([]byte{byte(e.Algo)})
-
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- _, err := w.Write(e.encryptedMPI1.EncodedBytes())
- return err
- case PubKeyAlgoElGamal:
- if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(e.encryptedMPI2.EncodedBytes())
- return err
- case PubKeyAlgoECDH:
- if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(e.encryptedMPI2.EncodedBytes())
- return err
- default:
- panic("internal error")
- }
-}
-
-// SerializeEncryptedKey serializes an encrypted key packet to w that contains
-// key, encrypted to pub.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
- var buf [10]byte
- buf[0] = encryptedKeyVersion
- binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
- buf[9] = byte(pub.PubKeyAlgo)
-
- keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
- keyBlock[0] = byte(cipherFunc)
- copy(keyBlock[1:], key)
- checksum := checksumKeyMaterial(key)
- keyBlock[1+len(key)] = byte(checksum >> 8)
- keyBlock[1+len(key)+1] = byte(checksum)
-
- switch pub.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
- case PubKeyAlgoElGamal:
- return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
- case PubKeyAlgoECDH:
- return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint)
- case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
- return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
- }
-
- return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
-}
-
-func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
- cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
- }
-
- cipherMPI := encoding.NewMPI(cipherText)
- packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength())
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- _, err = w.Write(cipherMPI.EncodedBytes())
- return err
-}
-
-func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
- c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
- }
-
- packetLen := 10 /* header length */
- packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
- packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil {
- return err
- }
- _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes())
- return err
-}
-
-func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error {
- vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint)
- if err != nil {
- return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error())
- }
-
- g := encoding.NewMPI(vsG)
- m := encoding.NewOID(c)
-
- packetLen := 10 /* header length */
- packetLen += int(g.EncodedLength()) + int(m.EncodedLength())
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- if _, err = w.Write(g.EncodedBytes()); err != nil {
- return err
- }
- _, err = w.Write(m.EncodedBytes())
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go
deleted file mode 100644
index 4be98760..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "encoding/binary"
- "io"
-)
-
-// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
-type LiteralData struct {
- Format uint8
- IsBinary bool
- FileName string
- Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
- Body io.Reader
-}
-
-// ForEyesOnly returns whether the contents of the LiteralData have been marked
-// as especially sensitive.
-func (l *LiteralData) ForEyesOnly() bool {
- return l.FileName == "_CONSOLE"
-}
-
-func (l *LiteralData) parse(r io.Reader) (err error) {
- var buf [256]byte
-
- _, err = readFull(r, buf[:2])
- if err != nil {
- return
- }
-
- l.Format = buf[0]
- l.IsBinary = l.Format == 'b'
- fileNameLen := int(buf[1])
-
- _, err = readFull(r, buf[:fileNameLen])
- if err != nil {
- return
- }
-
- l.FileName = string(buf[:fileNameLen])
-
- _, err = readFull(r, buf[:4])
- if err != nil {
- return
- }
-
- l.Time = binary.BigEndian.Uint32(buf[:4])
- l.Body = r
- return
-}
-
-// SerializeLiteral serializes a literal data packet to w and returns a
-// WriteCloser to which the data itself can be written and which MUST be closed
-// on completion. The fileName is truncated to 255 bytes.
-func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
- var buf [4]byte
- buf[0] = 't'
- if isBinary {
- buf[0] = 'b'
- }
- if len(fileName) > 255 {
- fileName = fileName[:255]
- }
- buf[1] = byte(len(fileName))
-
- inner, err := serializeStreamHeader(w, packetTypeLiteralData)
- if err != nil {
- return
- }
-
- _, err = inner.Write(buf[:2])
- if err != nil {
- return
- }
- _, err = inner.Write([]byte(fileName))
- if err != nil {
- return
- }
- binary.BigEndian.PutUint32(buf[:], time)
- _, err = inner.Write(buf[:])
- if err != nil {
- return
- }
-
- plaintext = inner
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go
deleted file mode 100644
index 2c3e3f50..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package packet
-
-// Notation type represents a Notation Data subpacket
-// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16
-type Notation struct {
- Name string
- Value []byte
- IsCritical bool
- IsHumanReadable bool
-}
-
-func (notation *Notation) getData() []byte {
- nameData := []byte(notation.Name)
- nameLen := len(nameData)
- valueLen := len(notation.Value)
-
- data := make([]byte, 8+nameLen+valueLen)
- if notation.IsHumanReadable {
- data[0] = 0x80
- }
-
- data[4] = byte(nameLen >> 8)
- data[5] = byte(nameLen)
- data[6] = byte(valueLen >> 8)
- data[7] = byte(valueLen)
- copy(data[8:8+nameLen], nameData)
- copy(data[8+nameLen:], notation.Value)
- return data
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go
deleted file mode 100644
index 4f26d0a0..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
-
-package packet
-
-import (
- "crypto/cipher"
-)
-
-type ocfbEncrypter struct {
- b cipher.Block
- fre []byte
- outUsed int
-}
-
-// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
-// performed.
-type OCFBResyncOption bool
-
-const (
- OCFBResync OCFBResyncOption = true
- OCFBNoResync OCFBResyncOption = false
-)
-
-// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
-// cipher feedback mode using the given cipher.Block, and an initial amount of
-// ciphertext. randData must be random bytes and be the same length as the
-// cipher.Block's block size. Resync determines if the "resynchronization step"
-// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
-// this point.
-func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
- blockSize := block.BlockSize()
- if len(randData) != blockSize {
- return nil, nil
- }
-
- x := &ocfbEncrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefix := make([]byte, blockSize+2)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefix[i] = randData[i] ^ x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
- prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- return x, prefix
-}
-
-func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- x.fre[x.outUsed] ^= src[i]
- dst[i] = x.fre[x.outUsed]
- x.outUsed++
- }
-}
-
-type ocfbDecrypter struct {
- b cipher.Block
- fre []byte
- outUsed int
-}
-
-// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
-// cipher feedback mode using the given cipher.Block. Prefix must be the first
-// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
-// block size. On successful exit, blockSize+2 bytes of decrypted data are written into
-// prefix. Resync determines if the "resynchronization step" from RFC 4880,
-// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
-func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
- blockSize := block.BlockSize()
- if len(prefix) != blockSize+2 {
- return nil
- }
-
- x := &ocfbDecrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefixCopy := make([]byte, len(prefix))
- copy(prefixCopy, prefix)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefixCopy[i] ^= x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefixCopy[blockSize] ^= x.fre[0]
- prefixCopy[blockSize+1] ^= x.fre[1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- copy(prefix, prefixCopy)
- return x
-}
-
-func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- c := src[i]
- dst[i] = x.fre[x.outUsed] ^ src[i]
- x.fre[x.outUsed] = c
- x.outUsed++
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go
deleted file mode 100644
index fff119e6..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "encoding/binary"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "io"
- "strconv"
-)
-
-// OnePassSignature represents a one-pass signature packet. See RFC 4880,
-// section 5.4.
-type OnePassSignature struct {
- SigType SignatureType
- Hash crypto.Hash
- PubKeyAlgo PublicKeyAlgorithm
- KeyId uint64
- IsLast bool
-}
-
-const onePassSignatureVersion = 3
-
-func (ops *OnePassSignature) parse(r io.Reader) (err error) {
- var buf [13]byte
-
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- if buf[0] != onePassSignatureVersion {
- err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
- }
-
- var ok bool
- ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2])
- if !ok {
- return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
- }
-
- ops.SigType = SignatureType(buf[1])
- ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
- ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
- ops.IsLast = buf[12] != 0
- return
-}
-
-// Serialize marshals the given OnePassSignature to w.
-func (ops *OnePassSignature) Serialize(w io.Writer) error {
- var buf [13]byte
- buf[0] = onePassSignatureVersion
- buf[1] = uint8(ops.SigType)
- var ok bool
- buf[2], ok = algorithm.HashToHashId(ops.Hash)
- if !ok {
- return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
- }
- buf[3] = uint8(ops.PubKeyAlgo)
- binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
- if ops.IsLast {
- buf[12] = 1
- }
-
- if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
- return err
- }
- _, err := w.Write(buf[:])
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go
deleted file mode 100644
index 4f820407..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
-// useful for splitting and storing the original packet contents separately,
-// handling unsupported packet types or accessing parts of the packet not yet
-// implemented by this package.
-type OpaquePacket struct {
- // Packet type
- Tag uint8
- // Reason why the packet was parsed opaquely
- Reason error
- // Binary contents of the packet data
- Contents []byte
-}
-
-func (op *OpaquePacket) parse(r io.Reader) (err error) {
- op.Contents, err = ioutil.ReadAll(r)
- return
-}
-
-// Serialize marshals the packet to a writer in its original form, including
-// the packet header.
-func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
- err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
- if err == nil {
- _, err = w.Write(op.Contents)
- }
- return
-}
-
-// Parse attempts to parse the opaque contents into a structure supported by
-// this package. If the packet is not known then the result will be another
-// OpaquePacket.
-func (op *OpaquePacket) Parse() (p Packet, err error) {
- hdr := bytes.NewBuffer(nil)
- err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
- if err != nil {
- op.Reason = err
- return op, err
- }
- p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
- if err != nil {
- op.Reason = err
- p = op
- }
- return
-}
-
-// OpaqueReader reads OpaquePackets from an io.Reader.
-type OpaqueReader struct {
- r io.Reader
-}
-
-func NewOpaqueReader(r io.Reader) *OpaqueReader {
- return &OpaqueReader{r: r}
-}
-
-// Read the next OpaquePacket.
-func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
- tag, _, contents, err := readHeader(or.r)
- if err != nil {
- return
- }
- op = &OpaquePacket{Tag: uint8(tag), Reason: err}
- err = op.parse(contents)
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
-// as found in signature and user attribute packets.
-type OpaqueSubpacket struct {
- SubType uint8
- EncodedLength []byte // Store the original encoded length for signature verifications.
- Contents []byte
-}
-
-// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
-// their byte representation.
-func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
- var (
- subHeaderLen int
- subPacket *OpaqueSubpacket
- )
- for len(contents) > 0 {
- subHeaderLen, subPacket, err = nextSubpacket(contents)
- if err != nil {
- break
- }
- result = append(result, subPacket)
- contents = contents[subHeaderLen+len(subPacket.Contents):]
- }
- return
-}
-
-func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
- // RFC 4880, section 5.2.3.1
- var subLen uint32
- var encodedLength []byte
- if len(contents) < 1 {
- goto Truncated
- }
- subPacket = &OpaqueSubpacket{}
- switch {
- case contents[0] < 192:
- subHeaderLen = 2 // 1 length byte, 1 subtype byte
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:1]
- subLen = uint32(contents[0])
- contents = contents[1:]
- case contents[0] < 255:
- subHeaderLen = 3 // 2 length bytes, 1 subtype
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:2]
- subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
- contents = contents[2:]
- default:
- subHeaderLen = 6 // 5 length bytes, 1 subtype
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:5]
- subLen = uint32(contents[1])<<24 |
- uint32(contents[2])<<16 |
- uint32(contents[3])<<8 |
- uint32(contents[4])
- contents = contents[5:]
-
- }
- if subLen > uint32(len(contents)) || subLen == 0 {
- goto Truncated
- }
- subPacket.SubType = contents[0]
- subPacket.EncodedLength = encodedLength
- subPacket.Contents = contents[1:subLen]
- return
-Truncated:
- err = errors.StructuralError("subpacket truncated")
- return
-}
-
-func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
- buf := make([]byte, 6)
- copy(buf, osp.EncodedLength)
- n := len(osp.EncodedLength)
-
- buf[n] = osp.SubType
- if _, err = w.Write(buf[:n+1]); err != nil {
- return
- }
- _, err = w.Write(osp.Contents)
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go
deleted file mode 100644
index f73f6f40..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go
+++ /dev/null
@@ -1,551 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package packet implements parsing and serialization of OpenPGP packets, as
-// specified in RFC 4880.
-package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet"
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/rsa"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// readFull is the same as io.ReadFull except that reading zero bytes returns
-// ErrUnexpectedEOF rather than EOF.
-func readFull(r io.Reader, buf []byte) (n int, err error) {
- n, err = io.ReadFull(r, buf)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
-func readLength(r io.Reader) (length int64, isPartial bool, err error) {
- var buf [4]byte
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- switch {
- case buf[0] < 192:
- length = int64(buf[0])
- case buf[0] < 224:
- length = int64(buf[0]-192) << 8
- _, err = readFull(r, buf[0:1])
- if err != nil {
- return
- }
- length += int64(buf[0]) + 192
- case buf[0] < 255:
- length = int64(1) << (buf[0] & 0x1f)
- isPartial = true
- default:
- _, err = readFull(r, buf[0:4])
- if err != nil {
- return
- }
- length = int64(buf[0])<<24 |
- int64(buf[1])<<16 |
- int64(buf[2])<<8 |
- int64(buf[3])
- }
- return
-}
-
-// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
-// The continuation lengths are parsed and removed from the stream and EOF is
-// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
-type partialLengthReader struct {
- r io.Reader
- remaining int64
- isPartial bool
-}
-
-func (r *partialLengthReader) Read(p []byte) (n int, err error) {
- for r.remaining == 0 {
- if !r.isPartial {
- return 0, io.EOF
- }
- r.remaining, r.isPartial, err = readLength(r.r)
- if err != nil {
- return 0, err
- }
- }
-
- toRead := int64(len(p))
- if toRead > r.remaining {
- toRead = r.remaining
- }
-
- n, err = r.r.Read(p[:int(toRead)])
- r.remaining -= int64(n)
- if n < int(toRead) && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
-// See RFC 4880, section 4.2.2.4.
-type partialLengthWriter struct {
- w io.WriteCloser
- buf bytes.Buffer
- lengthByte [1]byte
-}
-
-func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
- bufLen := w.buf.Len()
- if bufLen > 512 {
- for power := uint(30); ; power-- {
- l := 1 << power
- if bufLen >= l {
- w.lengthByte[0] = 224 + uint8(power)
- _, err = w.w.Write(w.lengthByte[:])
- if err != nil {
- return
- }
- var m int
- m, err = w.w.Write(w.buf.Next(l))
- if err != nil {
- return
- }
- if m != l {
- return 0, io.ErrShortWrite
- }
- break
- }
- }
- }
- return w.buf.Write(p)
-}
-
-func (w *partialLengthWriter) Close() (err error) {
- len := w.buf.Len()
- err = serializeLength(w.w, len)
- if err != nil {
- return err
- }
- _, err = w.buf.WriteTo(w.w)
- if err != nil {
- return err
- }
- return w.w.Close()
-}
-
-// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
-// underlying Reader returns EOF before the limit has been reached.
-type spanReader struct {
- r io.Reader
- n int64
-}
-
-func (l *spanReader) Read(p []byte) (n int, err error) {
- if l.n <= 0 {
- return 0, io.EOF
- }
- if int64(len(p)) > l.n {
- p = p[0:l.n]
- }
- n, err = l.r.Read(p)
- l.n -= int64(n)
- if l.n > 0 && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// readHeader parses a packet header and returns an io.Reader which will return
-// the contents of the packet. See RFC 4880, section 4.2.
-func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
- var buf [4]byte
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return
- }
- if buf[0]&0x80 == 0 {
- err = errors.StructuralError("tag byte does not have MSB set")
- return
- }
- if buf[0]&0x40 == 0 {
- // Old format packet
- tag = packetType((buf[0] & 0x3f) >> 2)
- lengthType := buf[0] & 3
- if lengthType == 3 {
- length = -1
- contents = r
- return
- }
- lengthBytes := 1 << lengthType
- _, err = readFull(r, buf[0:lengthBytes])
- if err != nil {
- return
- }
- for i := 0; i < lengthBytes; i++ {
- length <<= 8
- length |= int64(buf[i])
- }
- contents = &spanReader{r, length}
- return
- }
-
- // New format packet
- tag = packetType(buf[0] & 0x3f)
- length, isPartial, err := readLength(r)
- if err != nil {
- return
- }
- if isPartial {
- contents = &partialLengthReader{
- remaining: length,
- isPartial: true,
- r: r,
- }
- length = -1
- } else {
- contents = &spanReader{r, length}
- }
- return
-}
-
-// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
-// 4.2.
-func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
- err = serializeType(w, ptype)
- if err != nil {
- return
- }
- return serializeLength(w, length)
-}
-
-// serializeType writes an OpenPGP packet type to w. See RFC 4880, section
-// 4.2.
-func serializeType(w io.Writer, ptype packetType) (err error) {
- var buf [1]byte
- buf[0] = 0x80 | 0x40 | byte(ptype)
- _, err = w.Write(buf[:])
- return
-}
-
-// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section
-// 4.2.2.
-func serializeLength(w io.Writer, length int) (err error) {
- var buf [5]byte
- var n int
-
- if length < 192 {
- buf[0] = byte(length)
- n = 1
- } else if length < 8384 {
- length -= 192
- buf[0] = 192 + byte(length>>8)
- buf[1] = byte(length)
- n = 2
- } else {
- buf[0] = 255
- buf[1] = byte(length >> 24)
- buf[2] = byte(length >> 16)
- buf[3] = byte(length >> 8)
- buf[4] = byte(length)
- n = 5
- }
-
- _, err = w.Write(buf[:n])
- return
-}
-
-// serializeStreamHeader writes an OpenPGP packet header to w where the
-// length of the packet is unknown. It returns a io.WriteCloser which can be
-// used to write the contents of the packet. See RFC 4880, section 4.2.
-func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
- err = serializeType(w, ptype)
- if err != nil {
- return
- }
- out = &partialLengthWriter{w: w}
- return
-}
-
-// Packet represents an OpenPGP packet. Users are expected to try casting
-// instances of this interface to specific packet types.
-type Packet interface {
- parse(io.Reader) error
-}
-
-// consumeAll reads from the given Reader until error, returning the number of
-// bytes read.
-func consumeAll(r io.Reader) (n int64, err error) {
- var m int
- var buf [1024]byte
-
- for {
- m, err = r.Read(buf[:])
- n += int64(m)
- if err == io.EOF {
- err = nil
- return
- }
- if err != nil {
- return
- }
- }
-}
-
-// packetType represents the numeric ids of the different OpenPGP packet types. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
-type packetType uint8
-
-const (
- packetTypeEncryptedKey packetType = 1
- packetTypeSignature packetType = 2
- packetTypeSymmetricKeyEncrypted packetType = 3
- packetTypeOnePassSignature packetType = 4
- packetTypePrivateKey packetType = 5
- packetTypePublicKey packetType = 6
- packetTypePrivateSubkey packetType = 7
- packetTypeCompressed packetType = 8
- packetTypeSymmetricallyEncrypted packetType = 9
- packetTypeLiteralData packetType = 11
- packetTypeUserId packetType = 13
- packetTypePublicSubkey packetType = 14
- packetTypeUserAttribute packetType = 17
- packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18
- packetTypeAEADEncrypted packetType = 20
-)
-
-// EncryptedDataPacket holds encrypted data. It is currently implemented by
-// SymmetricallyEncrypted and AEADEncrypted.
-type EncryptedDataPacket interface {
- Decrypt(CipherFunction, []byte) (io.ReadCloser, error)
-}
-
-// Read reads a single OpenPGP packet from the given io.Reader. If there is an
-// error parsing a packet, the whole packet is consumed from the input.
-func Read(r io.Reader) (p Packet, err error) {
- tag, _, contents, err := readHeader(r)
- if err != nil {
- return
- }
-
- switch tag {
- case packetTypeEncryptedKey:
- p = new(EncryptedKey)
- case packetTypeSignature:
- p = new(Signature)
- case packetTypeSymmetricKeyEncrypted:
- p = new(SymmetricKeyEncrypted)
- case packetTypeOnePassSignature:
- p = new(OnePassSignature)
- case packetTypePrivateKey, packetTypePrivateSubkey:
- pk := new(PrivateKey)
- if tag == packetTypePrivateSubkey {
- pk.IsSubkey = true
- }
- p = pk
- case packetTypePublicKey, packetTypePublicSubkey:
- isSubkey := tag == packetTypePublicSubkey
- p = &PublicKey{IsSubkey: isSubkey}
- case packetTypeCompressed:
- p = new(Compressed)
- case packetTypeSymmetricallyEncrypted:
- p = new(SymmetricallyEncrypted)
- case packetTypeLiteralData:
- p = new(LiteralData)
- case packetTypeUserId:
- p = new(UserId)
- case packetTypeUserAttribute:
- p = new(UserAttribute)
- case packetTypeSymmetricallyEncryptedIntegrityProtected:
- se := new(SymmetricallyEncrypted)
- se.IntegrityProtected = true
- p = se
- case packetTypeAEADEncrypted:
- p = new(AEADEncrypted)
- default:
- err = errors.UnknownPacketTypeError(tag)
- }
- if p != nil {
- err = p.parse(contents)
- }
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// SignatureType represents the different semantic meanings of an OpenPGP
-// signature. See RFC 4880, section 5.2.1.
-type SignatureType uint8
-
-const (
- SigTypeBinary SignatureType = 0x00
- SigTypeText = 0x01
- SigTypeGenericCert = 0x10
- SigTypePersonaCert = 0x11
- SigTypeCasualCert = 0x12
- SigTypePositiveCert = 0x13
- SigTypeSubkeyBinding = 0x18
- SigTypePrimaryKeyBinding = 0x19
- SigTypeDirectSignature = 0x1F
- SigTypeKeyRevocation = 0x20
- SigTypeSubkeyRevocation = 0x28
- SigTypeCertificationRevocation = 0x30
-)
-
-// PublicKeyAlgorithm represents the different public key system specified for
-// OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
-type PublicKeyAlgorithm uint8
-
-const (
- PubKeyAlgoRSA PublicKeyAlgorithm = 1
- PubKeyAlgoElGamal PublicKeyAlgorithm = 16
- PubKeyAlgoDSA PublicKeyAlgorithm = 17
- // RFC 6637, Section 5.
- PubKeyAlgoECDH PublicKeyAlgorithm = 18
- PubKeyAlgoECDSA PublicKeyAlgorithm = 19
- // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt
- PubKeyAlgoEdDSA PublicKeyAlgorithm = 22
-
- // Deprecated in RFC 4880, Section 13.5. Use key flags instead.
- PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
- PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
-)
-
-// CanEncrypt returns true if it's possible to encrypt a message to a public
-// key of the given type.
-func (pka PublicKeyAlgorithm) CanEncrypt() bool {
- switch pka {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH:
- return true
- }
- return false
-}
-
-// CanSign returns true if it's possible for a public key of the given type to
-// sign a message.
-func (pka PublicKeyAlgorithm) CanSign() bool {
- switch pka {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA:
- return true
- }
- return false
-}
-
-// CipherFunction represents the different block ciphers specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
-type CipherFunction algorithm.CipherFunction
-
-const (
- Cipher3DES CipherFunction = 2
- CipherCAST5 CipherFunction = 3
- CipherAES128 CipherFunction = 7
- CipherAES192 CipherFunction = 8
- CipherAES256 CipherFunction = 9
-)
-
-// KeySize returns the key size, in bytes, of cipher.
-func (cipher CipherFunction) KeySize() int {
- return algorithm.CipherFunction(cipher).KeySize()
-}
-
-// IsSupported returns true if the cipher is supported from the library
-func (cipher CipherFunction) IsSupported() bool {
- return algorithm.CipherFunction(cipher).KeySize() > 0
-}
-
-// blockSize returns the block size, in bytes, of cipher.
-func (cipher CipherFunction) blockSize() int {
- return algorithm.CipherFunction(cipher).BlockSize()
-}
-
-// new returns a fresh instance of the given cipher.
-func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
- return algorithm.CipherFunction(cipher).New(key)
-}
-
-// padToKeySize left-pads a MPI with zeroes to match the length of the
-// specified RSA public.
-func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
- k := (pub.N.BitLen() + 7) / 8
- if len(b) >= k {
- return b
- }
- bb := make([]byte, k)
- copy(bb[len(bb)-len(b):], b)
- return bb
-}
-
-// CompressionAlgo Represents the different compression algorithms
-// supported by OpenPGP (except for BZIP2, which is not currently
-// supported). See Section 9.3 of RFC 4880.
-type CompressionAlgo uint8
-
-const (
- CompressionNone CompressionAlgo = 0
- CompressionZIP CompressionAlgo = 1
- CompressionZLIB CompressionAlgo = 2
-)
-
-// AEADMode represents the different Authenticated Encryption with Associated
-// Data specified for OpenPGP.
-// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6
-type AEADMode algorithm.AEADMode
-
-const (
- AEADModeEAX AEADMode = 1
- AEADModeOCB AEADMode = 2
- AEADModeGCM AEADMode = 3
-)
-
-func (mode AEADMode) IvLength() int {
- return algorithm.AEADMode(mode).NonceLength()
-}
-
-func (mode AEADMode) TagLength() int {
- return algorithm.AEADMode(mode).TagLength()
-}
-
-// new returns a fresh instance of the given mode.
-func (mode AEADMode) new(block cipher.Block) cipher.AEAD {
- return algorithm.AEADMode(mode).New(block)
-}
-
-// ReasonForRevocation represents a revocation reason code as per RFC4880
-// section 5.2.3.23.
-type ReasonForRevocation uint8
-
-const (
- NoReason ReasonForRevocation = 0
- KeySuperseded ReasonForRevocation = 1
- KeyCompromised ReasonForRevocation = 2
- KeyRetired ReasonForRevocation = 3
-)
-
-// Curve is a mapping to supported ECC curves for key generation.
-// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats
-type Curve string
-
-const (
- Curve25519 Curve = "Curve25519"
- Curve448 Curve = "Curve448"
- CurveNistP256 Curve = "P256"
- CurveNistP384 Curve = "P384"
- CurveNistP521 Curve = "P521"
- CurveSecP256k1 Curve = "SecP256k1"
- CurveBrainpoolP256 Curve = "BrainpoolP256"
- CurveBrainpoolP384 Curve = "BrainpoolP384"
- CurveBrainpoolP512 Curve = "BrainpoolP512"
-)
-
-// TrustLevel represents a trust level per RFC4880 5.2.3.13
-type TrustLevel uint8
-
-// TrustAmount represents a trust amount per RFC4880 5.2.3.13
-type TrustAmount uint8
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go
deleted file mode 100644
index 2898fa74..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go
+++ /dev/null
@@ -1,739 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/cipher"
- "crypto/dsa"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "io"
- "io/ioutil"
- "math/big"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
- "github.com/ProtonMail/go-crypto/openpgp/s2k"
-)
-
-// PrivateKey represents a possibly encrypted private key. See RFC 4880,
-// section 5.5.3.
-type PrivateKey struct {
- PublicKey
- Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
- encryptedData []byte
- cipher CipherFunction
- s2k func(out, in []byte)
- // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or
- // crypto.Signer/crypto.Decrypter (Decryptor RSA only).
- PrivateKey interface{}
- sha1Checksum bool
- iv []byte
-
- // Type of encryption of the S2K packet
- // Allowed values are 0 (Not encrypted), 254 (SHA1), or
- // 255 (2-byte checksum)
- s2kType S2KType
- // Full parameters of the S2K packet
- s2kParams *s2k.Params
-}
-
-//S2KType s2k packet type
-type S2KType uint8
-
-const (
- // S2KNON unencrypt
- S2KNON S2KType = 0
- // S2KSHA1 sha1 sum check
- S2KSHA1 S2KType = 254
- // S2KCHECKSUM sum check
- S2KCHECKSUM S2KType = 255
-)
-
-func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewEdDSAPrivateKey(creationTime time.Time, priv *eddsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
-// implements RSA, ECDSA or EdDSA.
-func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey {
- pk := new(PrivateKey)
- // In general, the public Keys should be used as pointers. We still
- // type-switch on the values, for backwards-compatibility.
- switch pubkey := signer.(type) {
- case *rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey)
- case rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey)
- case *ecdsa.PrivateKey:
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey)
- case ecdsa.PrivateKey:
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey)
- case *eddsa.PrivateKey:
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey)
- case eddsa.PrivateKey:
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey)
- default:
- panic("openpgp: unknown signer type in NewSignerPrivateKey")
- }
- pk.PrivateKey = signer
- return pk
-}
-
-// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey.
-func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey {
- pk := new(PrivateKey)
- switch priv := decrypter.(type) {
- case *rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
- case *elgamal.PrivateKey:
- pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
- case *ecdh.PrivateKey:
- pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey)
- default:
- panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey")
- }
- pk.PrivateKey = decrypter
- return pk
-}
-
-func (pk *PrivateKey) parse(r io.Reader) (err error) {
- err = (&pk.PublicKey).parse(r)
- if err != nil {
- return
- }
- v5 := pk.PublicKey.Version == 5
-
- var buf [1]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.s2kType = S2KType(buf[0])
- var optCount [1]byte
- if v5 {
- if _, err = readFull(r, optCount[:]); err != nil {
- return
- }
- }
-
- switch pk.s2kType {
- case S2KNON:
- pk.s2k = nil
- pk.Encrypted = false
- case S2KSHA1, S2KCHECKSUM:
- if v5 && pk.s2kType == S2KCHECKSUM {
- return errors.StructuralError("wrong s2k identifier for version 5")
- }
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.cipher = CipherFunction(buf[0])
- if pk.cipher != 0 && !pk.cipher.IsSupported() {
- return errors.UnsupportedError("unsupported cipher function in private key")
- }
- pk.s2kParams, err = s2k.ParseIntoParams(r)
- if err != nil {
- return
- }
- if pk.s2kParams.Dummy() {
- return
- }
- pk.s2k, err = pk.s2kParams.Function()
- if err != nil {
- return
- }
- pk.Encrypted = true
- if pk.s2kType == S2KSHA1 {
- pk.sha1Checksum = true
- }
- default:
- return errors.UnsupportedError("deprecated s2k function in private key")
- }
-
- if pk.Encrypted {
- blockSize := pk.cipher.blockSize()
- if blockSize == 0 {
- return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
- }
- pk.iv = make([]byte, blockSize)
- _, err = readFull(r, pk.iv)
- if err != nil {
- return
- }
- }
-
- var privateKeyData []byte
- if v5 {
- var n [4]byte /* secret material four octet count */
- _, err = readFull(r, n[:])
- if err != nil {
- return
- }
- count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3]))
- if !pk.Encrypted {
- count = count + 2 /* two octet checksum */
- }
- privateKeyData = make([]byte, count)
- _, err = readFull(r, privateKeyData)
- if err != nil {
- return
- }
- } else {
- privateKeyData, err = ioutil.ReadAll(r)
- if err != nil {
- return
- }
- }
- if !pk.Encrypted {
- if len(privateKeyData) < 2 {
- return errors.StructuralError("truncated private key data")
- }
- var sum uint16
- for i := 0; i < len(privateKeyData)-2; i++ {
- sum += uint16(privateKeyData[i])
- }
- if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) ||
- privateKeyData[len(privateKeyData)-1] != uint8(sum) {
- return errors.StructuralError("private key checksum failure")
- }
- privateKeyData = privateKeyData[:len(privateKeyData)-2]
- return pk.parsePrivateKey(privateKeyData)
- }
-
- pk.encryptedData = privateKeyData
- return
-}
-
-// Dummy returns true if the private key is a dummy key. This is a GNU extension.
-func (pk *PrivateKey) Dummy() bool {
- return pk.s2kParams.Dummy()
-}
-
-func mod64kHash(d []byte) uint16 {
- var h uint16
- for _, b := range d {
- h += uint16(b)
- }
- return h
-}
-
-func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
- contents := bytes.NewBuffer(nil)
- err = pk.PublicKey.serializeWithoutHeaders(contents)
- if err != nil {
- return
- }
- if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil {
- return
- }
-
- optional := bytes.NewBuffer(nil)
- if pk.Encrypted || pk.Dummy() {
- optional.Write([]byte{uint8(pk.cipher)})
- if err := pk.s2kParams.Serialize(optional); err != nil {
- return err
- }
- if pk.Encrypted {
- optional.Write(pk.iv)
- }
- }
- if pk.Version == 5 {
- contents.Write([]byte{uint8(optional.Len())})
- }
- io.Copy(contents, optional)
-
- if !pk.Dummy() {
- l := 0
- var priv []byte
- if !pk.Encrypted {
- buf := bytes.NewBuffer(nil)
- err = pk.serializePrivateKey(buf)
- if err != nil {
- return err
- }
- l = buf.Len()
- checksum := mod64kHash(buf.Bytes())
- buf.Write([]byte{byte(checksum >> 8), byte(checksum)})
- priv = buf.Bytes()
- } else {
- priv, l = pk.encryptedData, len(pk.encryptedData)
- }
-
- if pk.Version == 5 {
- contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)})
- }
- contents.Write(priv)
- }
-
- ptype := packetTypePrivateKey
- if pk.IsSubkey {
- ptype = packetTypePrivateSubkey
- }
- err = serializeHeader(w, ptype, contents.Len())
- if err != nil {
- return
- }
- _, err = io.Copy(w, contents)
- if err != nil {
- return
- }
- return
-}
-
-func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil {
- return err
- }
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil {
- return err
- }
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes())
- return err
-}
-
-func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
- _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes())
- return err
-}
-
-func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
- _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes())
- return err
-}
-
-func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalIntegerSecret()).EncodedBytes())
- return err
-}
-
-func serializeEdDSAPrivateKey(w io.Writer, priv *eddsa.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes())
- return err
-}
-
-func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes())
- return err
-}
-
-// Decrypt decrypts an encrypted private key using a passphrase.
-func (pk *PrivateKey) Decrypt(passphrase []byte) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if !pk.Encrypted {
- return nil
- }
-
- key := make([]byte, pk.cipher.KeySize())
- pk.s2k(key, passphrase)
- block := pk.cipher.new(key)
- cfb := cipher.NewCFBDecrypter(block, pk.iv)
-
- data := make([]byte, len(pk.encryptedData))
- cfb.XORKeyStream(data, pk.encryptedData)
-
- if pk.sha1Checksum {
- if len(data) < sha1.Size {
- return errors.StructuralError("truncated private key data")
- }
- h := sha1.New()
- h.Write(data[:len(data)-sha1.Size])
- sum := h.Sum(nil)
- if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
- return errors.StructuralError("private key checksum failure")
- }
- data = data[:len(data)-sha1.Size]
- } else {
- if len(data) < 2 {
- return errors.StructuralError("truncated private key data")
- }
- var sum uint16
- for i := 0; i < len(data)-2; i++ {
- sum += uint16(data[i])
- }
- if data[len(data)-2] != uint8(sum>>8) ||
- data[len(data)-1] != uint8(sum) {
- return errors.StructuralError("private key checksum failure")
- }
- data = data[:len(data)-2]
- }
-
- err := pk.parsePrivateKey(data)
- if _, ok := err.(errors.KeyInvalidError); ok {
- return errors.KeyInvalidError("invalid key parameters")
- }
- if err != nil {
- return err
- }
-
- // Mark key as unencrypted
- pk.s2kType = S2KNON
- pk.s2k = nil
- pk.Encrypted = false
- pk.encryptedData = nil
-
- return nil
-}
-
-// Encrypt encrypts an unencrypted private key using a passphrase.
-func (pk *PrivateKey) Encrypt(passphrase []byte) error {
- priv := bytes.NewBuffer(nil)
- err := pk.serializePrivateKey(priv)
- if err != nil {
- return err
- }
-
- //Default config of private key encryption
- pk.cipher = CipherAES256
- s2kConfig := &s2k.Config{
- S2KMode: 3, //Iterated
- S2KCount: 65536,
- Hash: crypto.SHA256,
- }
-
- pk.s2kParams, err = s2k.Generate(rand.Reader, s2kConfig)
- if err != nil {
- return err
- }
- privateKeyBytes := priv.Bytes()
- key := make([]byte, pk.cipher.KeySize())
-
- pk.sha1Checksum = true
- pk.s2k, err = pk.s2kParams.Function()
- if err != nil {
- return err
- }
- pk.s2k(key, passphrase)
- block := pk.cipher.new(key)
- pk.iv = make([]byte, pk.cipher.blockSize())
- _, err = rand.Read(pk.iv)
- if err != nil {
- return err
- }
- cfb := cipher.NewCFBEncrypter(block, pk.iv)
-
- if pk.sha1Checksum {
- pk.s2kType = S2KSHA1
- h := sha1.New()
- h.Write(privateKeyBytes)
- sum := h.Sum(nil)
- privateKeyBytes = append(privateKeyBytes, sum...)
- } else {
- pk.s2kType = S2KCHECKSUM
- var sum uint16
- for _, b := range privateKeyBytes {
- sum += uint16(b)
- }
- priv.Write([]byte{uint8(sum >> 8), uint8(sum)})
- }
-
- pk.encryptedData = make([]byte, len(privateKeyBytes))
- cfb.XORKeyStream(pk.encryptedData, privateKeyBytes)
- pk.Encrypted = true
- pk.PrivateKey = nil
- return err
-}
-
-func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) {
- switch priv := pk.PrivateKey.(type) {
- case *rsa.PrivateKey:
- err = serializeRSAPrivateKey(w, priv)
- case *dsa.PrivateKey:
- err = serializeDSAPrivateKey(w, priv)
- case *elgamal.PrivateKey:
- err = serializeElGamalPrivateKey(w, priv)
- case *ecdsa.PrivateKey:
- err = serializeECDSAPrivateKey(w, priv)
- case *eddsa.PrivateKey:
- err = serializeEdDSAPrivateKey(w, priv)
- case *ecdh.PrivateKey:
- err = serializeECDHPrivateKey(w, priv)
- default:
- err = errors.InvalidArgumentError("unknown private key type")
- }
- return
-}
-
-func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
- switch pk.PublicKey.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
- return pk.parseRSAPrivateKey(data)
- case PubKeyAlgoDSA:
- return pk.parseDSAPrivateKey(data)
- case PubKeyAlgoElGamal:
- return pk.parseElGamalPrivateKey(data)
- case PubKeyAlgoECDSA:
- return pk.parseECDSAPrivateKey(data)
- case PubKeyAlgoECDH:
- return pk.parseECDHPrivateKey(data)
- case PubKeyAlgoEdDSA:
- return pk.parseEdDSAPrivateKey(data)
- }
- panic("impossible")
-}
-
-func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
- rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
- rsaPriv := new(rsa.PrivateKey)
- rsaPriv.PublicKey = *rsaPub
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- p := new(encoding.MPI)
- if _, err := p.ReadFrom(buf); err != nil {
- return err
- }
-
- q := new(encoding.MPI)
- if _, err := q.ReadFrom(buf); err != nil {
- return err
- }
-
- rsaPriv.D = new(big.Int).SetBytes(d.Bytes())
- rsaPriv.Primes = make([]*big.Int, 2)
- rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes())
- rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes())
- if err := rsaPriv.Validate(); err != nil {
- return errors.KeyInvalidError(err.Error())
- }
- rsaPriv.Precompute()
- pk.PrivateKey = rsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
- dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
- dsaPriv := new(dsa.PrivateKey)
- dsaPriv.PublicKey = *dsaPub
-
- buf := bytes.NewBuffer(data)
- x := new(encoding.MPI)
- if _, err := x.ReadFrom(buf); err != nil {
- return err
- }
-
- dsaPriv.X = new(big.Int).SetBytes(x.Bytes())
- if err := validateDSAParameters(dsaPriv); err != nil {
- return err
- }
- pk.PrivateKey = dsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
- pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
- priv := new(elgamal.PrivateKey)
- priv.PublicKey = *pub
-
- buf := bytes.NewBuffer(data)
- x := new(encoding.MPI)
- if _, err := x.ReadFrom(buf); err != nil {
- return err
- }
-
- priv.X = new(big.Int).SetBytes(x.Bytes())
- if err := validateElGamalParameters(priv); err != nil {
- return err
- }
- pk.PrivateKey = priv
-
- return nil
-}
-
-func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) {
- ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey)
- ecdsaPriv := ecdsa.NewPrivateKey(*ecdsaPub)
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err := ecdsaPriv.UnmarshalIntegerSecret(d.Bytes()); err != nil {
- return err
- }
- if err := ecdsa.Validate(ecdsaPriv); err != nil {
- return err
- }
- pk.PrivateKey = ecdsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) {
- ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey)
- ecdhPriv := ecdh.NewPrivateKey(*ecdhPub)
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err := ecdhPriv.UnmarshalByteSecret(d.Bytes()); err != nil {
- return err
- }
-
- if err := ecdh.Validate(ecdhPriv); err != nil {
- return err
- }
-
- pk.PrivateKey = ecdhPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) {
- eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey)
- eddsaPriv := eddsa.NewPrivateKey(*eddsaPub)
- eddsaPriv.PublicKey = *eddsaPub
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err = eddsaPriv.UnmarshalByteSecret(d.Bytes()); err != nil {
- return err
- }
-
- if err := eddsa.Validate(eddsaPriv); err != nil {
- return err
- }
-
- pk.PrivateKey = eddsaPriv
-
- return nil
-}
-
-func validateDSAParameters(priv *dsa.PrivateKey) error {
- p := priv.P // group prime
- q := priv.Q // subgroup order
- g := priv.G // g has order q mod p
- x := priv.X // secret
- y := priv.Y // y == g**x mod p
- one := big.NewInt(1)
- // expect g, y >= 2 and g < p
- if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 {
- return errors.KeyInvalidError("dsa: invalid group")
- }
- // expect p > q
- if p.Cmp(q) <= 0 {
- return errors.KeyInvalidError("dsa: invalid group prime")
- }
- // q should be large enough and divide p-1
- pSub1 := new(big.Int).Sub(p, one)
- if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 {
- return errors.KeyInvalidError("dsa: invalid order")
- }
- // confirm that g has order q mod p
- if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 {
- return errors.KeyInvalidError("dsa: invalid order")
- }
- // check y
- if new(big.Int).Exp(g, x, p).Cmp(y) != 0 {
- return errors.KeyInvalidError("dsa: mismatching values")
- }
-
- return nil
-}
-
-func validateElGamalParameters(priv *elgamal.PrivateKey) error {
- p := priv.P // group prime
- g := priv.G // g has order p-1 mod p
- x := priv.X // secret
- y := priv.Y // y == g**x mod p
- one := big.NewInt(1)
- // Expect g, y >= 2 and g < p
- if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 {
- return errors.KeyInvalidError("elgamal: invalid group")
- }
- if p.BitLen() < 1024 {
- return errors.KeyInvalidError("elgamal: group order too small")
- }
- pSub1 := new(big.Int).Sub(p, one)
- if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 {
- return errors.KeyInvalidError("elgamal: invalid group")
- }
- // Since p-1 is not prime, g might have a smaller order that divides p-1.
- // We cannot confirm the exact order of g, but we make sure it is not too small.
- gExpI := new(big.Int).Set(g)
- i := 1
- threshold := 2 << 17 // we want order > threshold
- for i < threshold {
- i++ // we check every order to make sure key validation is not easily bypassed by guessing y'
- gExpI.Mod(new(big.Int).Mul(gExpI, g), p)
- if gExpI.Cmp(one) == 0 {
- return errors.KeyInvalidError("elgamal: order too small")
- }
- }
- // Check y
- if new(big.Int).Exp(g, x, p).Cmp(y) != 0 {
- return errors.KeyInvalidError("elgamal: mismatching values")
- }
-
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go
deleted file mode 100644
index 029b8f1a..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package packet
-
-// Generated with `gpg --export-secret-keys "Test Key 2"`
-const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec"
-
-// Generated by `gpg --export-secret-keys` followed by a manual extraction of
-// the ElGamal subkey from the packets.
-const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc"
-
-// pkcs1PrivKeyHex is a PKCS#1, RSA private key.
-// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p`
-const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7"
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
deleted file mode 100644
index e0f5f74a..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
+++ /dev/null
@@ -1,802 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "crypto/dsa"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- _ "crypto/sha512"
- "encoding/binary"
- "fmt"
- "hash"
- "io"
- "math/big"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
-)
-
-type kdfHashFunction byte
-type kdfAlgorithm byte
-
-// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
-type PublicKey struct {
- Version int
- CreationTime time.Time
- PubKeyAlgo PublicKeyAlgorithm
- PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey
- Fingerprint []byte
- KeyId uint64
- IsSubkey bool
-
- // RFC 4880 fields
- n, e, p, q, g, y encoding.Field
-
- // RFC 6637 fields
- // oid contains the OID byte sequence identifying the elliptic curve used
- oid encoding.Field
-
- // kdf stores key derivation function parameters
- // used for ECDH encryption. See RFC 6637, Section 9.
- kdf encoding.Field
-}
-
-// UpgradeToV5 updates the version of the key to v5, and updates all necessary
-// fields.
-func (pk *PublicKey) UpgradeToV5() {
- pk.Version = 5
- pk.setFingerprintAndKeyId()
-}
-
-// signingKey provides a convenient abstraction over signature verification
-// for v3 and v4 public keys.
-type signingKey interface {
- SerializeForHash(io.Writer) error
- SerializeSignaturePrefix(io.Writer)
- serializeWithoutHeaders(io.Writer) error
-}
-
-// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
-func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoRSA,
- PublicKey: pub,
- n: new(encoding.MPI).SetBig(pub.N),
- e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
-func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoDSA,
- PublicKey: pub,
- p: new(encoding.MPI).SetBig(pub.P),
- q: new(encoding.MPI).SetBig(pub.Q),
- g: new(encoding.MPI).SetBig(pub.G),
- y: new(encoding.MPI).SetBig(pub.Y),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
-func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoElGamal,
- PublicKey: pub,
- p: new(encoding.MPI).SetBig(pub.P),
- g: new(encoding.MPI).SetBig(pub.G),
- y: new(encoding.MPI).SetBig(pub.Y),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoECDSA,
- PublicKey: pub,
- p: encoding.NewMPI(pub.MarshalPoint()),
- }
-
- curveInfo := ecc.FindByCurve(pub.GetCurve())
- if curveInfo == nil {
- panic("unknown elliptic curve")
- }
- pk.oid = curveInfo.Oid
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey {
- var pk *PublicKey
- var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()})
- pk = &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoECDH,
- PublicKey: pub,
- p: encoding.NewMPI(pub.MarshalPoint()),
- kdf: kdf,
- }
-
- curveInfo := ecc.FindByCurve(pub.GetCurve())
-
- if curveInfo == nil {
- panic("unknown elliptic curve")
- }
-
- pk.oid = curveInfo.Oid
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey {
- curveInfo := ecc.FindByCurve(pub.GetCurve())
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoEdDSA,
- PublicKey: pub,
- oid: curveInfo.Oid,
- // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B
- p: encoding.NewMPI(pub.MarshalPoint()),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func (pk *PublicKey) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.5.2
- var buf [6]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- if buf[0] != 4 && buf[0] != 5 {
- return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0])))
- }
-
- pk.Version = int(buf[0])
- if pk.Version == 5 {
- var n [4]byte
- _, err = readFull(r, n[:])
- if err != nil {
- return
- }
- }
- pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
- pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- err = pk.parseRSA(r)
- case PubKeyAlgoDSA:
- err = pk.parseDSA(r)
- case PubKeyAlgoElGamal:
- err = pk.parseElGamal(r)
- case PubKeyAlgoECDSA:
- err = pk.parseECDSA(r)
- case PubKeyAlgoECDH:
- err = pk.parseECDH(r)
- case PubKeyAlgoEdDSA:
- err = pk.parseEdDSA(r)
- default:
- err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
- }
- if err != nil {
- return
- }
-
- pk.setFingerprintAndKeyId()
- return
-}
-
-func (pk *PublicKey) setFingerprintAndKeyId() {
- // RFC 4880, section 12.2
- if pk.Version == 5 {
- fingerprint := sha256.New()
- pk.SerializeForHash(fingerprint)
- pk.Fingerprint = make([]byte, 32)
- copy(pk.Fingerprint, fingerprint.Sum(nil))
- pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8])
- } else {
- fingerprint := sha1.New()
- pk.SerializeForHash(fingerprint)
- pk.Fingerprint = make([]byte, 20)
- copy(pk.Fingerprint, fingerprint.Sum(nil))
- pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
- }
-}
-
-// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
- pk.n = new(encoding.MPI)
- if _, err = pk.n.ReadFrom(r); err != nil {
- return
- }
- pk.e = new(encoding.MPI)
- if _, err = pk.e.ReadFrom(r); err != nil {
- return
- }
-
- if len(pk.e.Bytes()) > 3 {
- err = errors.UnsupportedError("large public exponent")
- return
- }
- rsa := &rsa.PublicKey{
- N: new(big.Int).SetBytes(pk.n.Bytes()),
- E: 0,
- }
- for i := 0; i < len(pk.e.Bytes()); i++ {
- rsa.E <<= 8
- rsa.E |= int(pk.e.Bytes()[i])
- }
- pk.PublicKey = rsa
- return
-}
-
-// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.q = new(encoding.MPI)
- if _, err = pk.q.ReadFrom(r); err != nil {
- return
- }
- pk.g = new(encoding.MPI)
- if _, err = pk.g.ReadFrom(r); err != nil {
- return
- }
- pk.y = new(encoding.MPI)
- if _, err = pk.y.ReadFrom(r); err != nil {
- return
- }
-
- dsa := new(dsa.PublicKey)
- dsa.P = new(big.Int).SetBytes(pk.p.Bytes())
- dsa.Q = new(big.Int).SetBytes(pk.q.Bytes())
- dsa.G = new(big.Int).SetBytes(pk.g.Bytes())
- dsa.Y = new(big.Int).SetBytes(pk.y.Bytes())
- pk.PublicKey = dsa
- return
-}
-
-// parseElGamal parses ElGamal public key material from the given Reader. See
-// RFC 4880, section 5.5.2.
-func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.g = new(encoding.MPI)
- if _, err = pk.g.ReadFrom(r); err != nil {
- return
- }
- pk.y = new(encoding.MPI)
- if _, err = pk.y.ReadFrom(r); err != nil {
- return
- }
-
- elgamal := new(elgamal.PublicKey)
- elgamal.P = new(big.Int).SetBytes(pk.p.Bytes())
- elgamal.G = new(big.Int).SetBytes(pk.g.Bytes())
- elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes())
- pk.PublicKey = elgamal
- return
-}
-
-// parseECDSA parses ECDSA public key material from the given Reader. See
-// RFC 6637, Section 9.
-func (pk *PublicKey) parseECDSA(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
-
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- c, ok := curveInfo.Curve.(ecc.ECDSACurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- ecdsaKey := ecdsa.NewPublicKey(c)
- err = ecdsaKey.UnmarshalPoint(pk.p.Bytes())
- pk.PublicKey = ecdsaKey
-
- return
-}
-
-// parseECDH parses ECDH public key material from the given Reader. See
-// RFC 6637, Section 9.
-func (pk *PublicKey) parseECDH(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.kdf = new(encoding.OID)
- if _, err = pk.kdf.ReadFrom(r); err != nil {
- return
- }
-
- curveInfo := ecc.FindByOid(pk.oid)
-
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- c, ok := curveInfo.Curve.(ecc.ECDHCurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 {
- return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
- }
- if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 {
- return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved)))
- }
- kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]]
- if !ok {
- return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1])))
- }
- kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]]
- if !ok {
- return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2])))
- }
-
- ecdhKey := ecdh.NewPublicKey(c, kdfHash, kdfCipher)
- err = ecdhKey.UnmarshalPoint(pk.p.Bytes())
- pk.PublicKey = ecdhKey
-
- return
-}
-
-func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- c, ok := curveInfo.Curve.(ecc.EdDSACurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
-
- pub := eddsa.NewPublicKey(c)
-
- switch flag := pk.p.Bytes()[0]; flag {
- case 0x04:
- // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt
- return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag)))
- case 0x40:
- err = pub.UnmarshalPoint(pk.p.Bytes())
- default:
- return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag)))
- }
-
- pk.PublicKey = pub
- return
-}
-
-// SerializeForHash serializes the PublicKey to w with the special packet
-// header format needed for hashing.
-func (pk *PublicKey) SerializeForHash(w io.Writer) error {
- pk.SerializeSignaturePrefix(w)
- return pk.serializeWithoutHeaders(w)
-}
-
-// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
-// The prefix is used when calculating a signature over this public key. See
-// RFC 4880, section 5.2.4.
-func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) {
- var pLength = pk.algorithmSpecificByteCount()
- if pk.Version == 5 {
- pLength += 10 // version, timestamp (4), algorithm, key octet count (4).
- w.Write([]byte{
- 0x9A,
- byte(pLength >> 24),
- byte(pLength >> 16),
- byte(pLength >> 8),
- byte(pLength),
- })
- return
- }
- pLength += 6
- w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
-}
-
-func (pk *PublicKey) Serialize(w io.Writer) (err error) {
- length := 6 // 6 byte header
- length += pk.algorithmSpecificByteCount()
- if pk.Version == 5 {
- length += 4 // octet key count
- }
- packetType := packetTypePublicKey
- if pk.IsSubkey {
- packetType = packetTypePublicSubkey
- }
- err = serializeHeader(w, packetType, length)
- if err != nil {
- return
- }
- return pk.serializeWithoutHeaders(w)
-}
-
-func (pk *PublicKey) algorithmSpecificByteCount() int {
- length := 0
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- length += int(pk.n.EncodedLength())
- length += int(pk.e.EncodedLength())
- case PubKeyAlgoDSA:
- length += int(pk.p.EncodedLength())
- length += int(pk.q.EncodedLength())
- length += int(pk.g.EncodedLength())
- length += int(pk.y.EncodedLength())
- case PubKeyAlgoElGamal:
- length += int(pk.p.EncodedLength())
- length += int(pk.g.EncodedLength())
- length += int(pk.y.EncodedLength())
- case PubKeyAlgoECDSA:
- length += int(pk.oid.EncodedLength())
- length += int(pk.p.EncodedLength())
- case PubKeyAlgoECDH:
- length += int(pk.oid.EncodedLength())
- length += int(pk.p.EncodedLength())
- length += int(pk.kdf.EncodedLength())
- case PubKeyAlgoEdDSA:
- length += int(pk.oid.EncodedLength())
- length += int(pk.p.EncodedLength())
- default:
- panic("unknown public key algorithm")
- }
- return length
-}
-
-// serializeWithoutHeaders marshals the PublicKey to w in the form of an
-// OpenPGP public key packet, not including the packet header.
-func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
- t := uint32(pk.CreationTime.Unix())
- if _, err = w.Write([]byte{
- byte(pk.Version),
- byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t),
- byte(pk.PubKeyAlgo),
- }); err != nil {
- return
- }
-
- if pk.Version == 5 {
- n := pk.algorithmSpecificByteCount()
- if _, err = w.Write([]byte{
- byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n),
- }); err != nil {
- return
- }
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- if _, err = w.Write(pk.n.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.e.EncodedBytes())
- return
- case PubKeyAlgoDSA:
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.q.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.g.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.y.EncodedBytes())
- return
- case PubKeyAlgoElGamal:
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.g.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.y.EncodedBytes())
- return
- case PubKeyAlgoECDSA:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.p.EncodedBytes())
- return
- case PubKeyAlgoECDH:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.kdf.EncodedBytes())
- return
- case PubKeyAlgoEdDSA:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.p.EncodedBytes())
- return
- }
- return errors.InvalidArgumentError("bad public-key algorithm")
-}
-
-// CanSign returns true iff this public key can generate signatures
-func (pk *PublicKey) CanSign() bool {
- return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH
-}
-
-// VerifySignature returns nil iff sig is a valid signature, made by this
-// public key, of the data hashed into signed. signed is mutated by this call.
-func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
- if !pk.CanSign() {
- return errors.InvalidArgumentError("public key cannot generate signatures")
- }
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.AddMetadataToHashSuffix()
- }
- signed.Write(sig.HashSuffix)
- hashBytes := signed.Sum(nil)
- if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
- return errors.SignatureError("hash tag doesn't match")
- }
-
- if pk.PubKeyAlgo != sig.PubKeyAlgo {
- return errors.InvalidArgumentError("public key and signature use different algorithms")
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
- err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes()))
- if err != nil {
- return errors.SignatureError("RSA verification failure")
- }
- return nil
- case PubKeyAlgoDSA:
- dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
- if len(hashBytes) > subgroupSize {
- hashBytes = hashBytes[:subgroupSize]
- }
- if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) {
- return errors.SignatureError("DSA verification failure")
- }
- return nil
- case PubKeyAlgoECDSA:
- ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
- if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) {
- return errors.SignatureError("ECDSA verification failure")
- }
- return nil
- case PubKeyAlgoEdDSA:
- eddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey)
- if !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) {
- return errors.SignatureError("EdDSA verification failure")
- }
- return nil
- default:
- return errors.SignatureError("Unsupported public key algorithm used in signature")
- }
-}
-
-// keySignatureHash returns a Hash of the message that needs to be signed for
-// pk to assert a subkey relationship to signed.
-func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
- if !hashFunc.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- h = hashFunc.New()
-
- // RFC 4880, section 5.2.4
- err = pk.SerializeForHash(h)
- if err != nil {
- return nil, err
- }
-
- err = signed.SerializeForHash(h)
- return
-}
-
-// VerifyKeySignature returns nil iff sig is a valid signature, made by this
-// public key, of signed.
-func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
- h, err := keySignatureHash(pk, signed, sig.Hash)
- if err != nil {
- return err
- }
- if err = pk.VerifySignature(h, sig); err != nil {
- return err
- }
-
- if sig.FlagSign {
- // Signing subkeys must be cross-signed. See
- // https://www.gnupg.org/faq/subkey-cross-certify.html.
- if sig.EmbeddedSignature == nil {
- return errors.StructuralError("signing subkey is missing cross-signature")
- }
- // Verify the cross-signature. This is calculated over the same
- // data as the main signature, so we cannot just recursively
- // call signed.VerifyKeySignature(...)
- if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil {
- return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
- }
- if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
- return errors.StructuralError("error while verifying cross-signature: " + err.Error())
- }
- }
-
- return nil
-}
-
-func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
- if !hashFunc.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- h = hashFunc.New()
-
- // RFC 4880, section 5.2.4
- err = pk.SerializeForHash(h)
-
- return
-}
-
-// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
-// public key.
-func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
- h, err := keyRevocationHash(pk, sig.Hash)
- if err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature,
-// made by this public key, of signed.
-func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) {
- h, err := keySignatureHash(pk, signed, sig.Hash)
- if err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// userIdSignatureHash returns a Hash of the message that needs to be signed
-// to assert that pk is a valid key for id.
-func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
- if !hashFunc.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- h = hashFunc.New()
-
- // RFC 4880, section 5.2.4
- pk.SerializeSignaturePrefix(h)
- pk.serializeWithoutHeaders(h)
-
- var buf [5]byte
- buf[0] = 0xb4
- buf[1] = byte(len(id) >> 24)
- buf[2] = byte(len(id) >> 16)
- buf[3] = byte(len(id) >> 8)
- buf[4] = byte(len(id))
- h.Write(buf[:])
- h.Write([]byte(id))
-
- return
-}
-
-// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
-// public key, that id is the identity of pub.
-func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
- h, err := userIdSignatureHash(id, pub, sig.Hash)
- if err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// KeyIdString returns the public key's fingerprint in capital hex
-// (e.g. "6C7EE1B8621CC013").
-func (pk *PublicKey) KeyIdString() string {
- return fmt.Sprintf("%X", pk.Fingerprint[12:20])
-}
-
-// KeyIdShortString returns the short form of public key's fingerprint
-// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
-func (pk *PublicKey) KeyIdShortString() string {
- return fmt.Sprintf("%X", pk.Fingerprint[16:20])
-}
-
-// BitLength returns the bit length for the given public key.
-func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- bitLength = pk.n.BitLength()
- case PubKeyAlgoDSA:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoElGamal:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoECDSA:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoECDH:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoEdDSA:
- bitLength = pk.p.BitLength()
- default:
- err = errors.InvalidArgumentError("bad public-key algorithm")
- }
- return
-}
-
-// KeyExpired returns whether sig is a self-signature of a key that has
-// expired or is created in the future.
-func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool {
- if pk.CreationTime.After(currentTime) {
- return true
- }
- if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 {
- return false
- }
- expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
- return currentTime.After(expiry)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go
deleted file mode 100644
index b255f1f6..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package packet
-
-const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb"
-
-const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001"
-
-const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed"
-
-const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0"
-
-const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b"
-
-const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4"
-
-const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946"
-
-const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909"
-
-const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8"
-
-const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203"
-
-// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key
-const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go
deleted file mode 100644
index 10215fe5..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// Reader reads packets from an io.Reader and allows packets to be 'unread' so
-// that they result from the next call to Next.
-type Reader struct {
- q []Packet
- readers []io.Reader
-}
-
-// New io.Readers are pushed when a compressed or encrypted packet is processed
-// and recursively treated as a new source of packets. However, a carefully
-// crafted packet can trigger an infinite recursive sequence of packets. See
-// http://mumble.net/~campbell/misc/pgp-quine
-// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
-// This constant limits the number of recursive packets that may be pushed.
-const maxReaders = 32
-
-// Next returns the most recently unread Packet, or reads another packet from
-// the top-most io.Reader. Unknown packet types are skipped.
-func (r *Reader) Next() (p Packet, err error) {
- if len(r.q) > 0 {
- p = r.q[len(r.q)-1]
- r.q = r.q[:len(r.q)-1]
- return
- }
-
- for len(r.readers) > 0 {
- p, err = Read(r.readers[len(r.readers)-1])
- if err == nil {
- return
- }
- if err == io.EOF {
- r.readers = r.readers[:len(r.readers)-1]
- continue
- }
- // TODO: Add strict mode that rejects unknown packets, instead of ignoring them.
- if _, ok := err.(errors.UnknownPacketTypeError); ok {
- continue
- }
- if _, ok := err.(errors.UnsupportedError); ok {
- switch p.(type) {
- case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData:
- return nil, err
- }
- continue
- }
- return nil, err
- }
-
- return nil, io.EOF
-}
-
-// Push causes the Reader to start reading from a new io.Reader. When an EOF
-// error is seen from the new io.Reader, it is popped and the Reader continues
-// to read from the next most recent io.Reader. Push returns a StructuralError
-// if pushing the reader would exceed the maximum recursion level, otherwise it
-// returns nil.
-func (r *Reader) Push(reader io.Reader) (err error) {
- if len(r.readers) >= maxReaders {
- return errors.StructuralError("too many layers of packets")
- }
- r.readers = append(r.readers, reader)
- return nil
-}
-
-// Unread causes the given Packet to be returned from the next call to Next.
-func (r *Reader) Unread(p Packet) {
- r.q = append(r.q, p)
-}
-
-func NewReader(r io.Reader) *Reader {
- return &Reader{
- q: nil,
- readers: []io.Reader{r},
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
deleted file mode 100644
index 9f0b1b19..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
+++ /dev/null
@@ -1,1068 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/dsa"
- "encoding/binary"
- "hash"
- "io"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
-)
-
-const (
- // See RFC 4880, section 5.2.3.21 for details.
- KeyFlagCertify = 1 << iota
- KeyFlagSign
- KeyFlagEncryptCommunications
- KeyFlagEncryptStorage
- KeyFlagSplitKey
- KeyFlagAuthenticate
- _
- KeyFlagGroupKey
-)
-
-// Signature represents a signature. See RFC 4880, section 5.2.
-type Signature struct {
- Version int
- SigType SignatureType
- PubKeyAlgo PublicKeyAlgorithm
- Hash crypto.Hash
-
- // HashSuffix is extra data that is hashed in after the signed data.
- HashSuffix []byte
- // HashTag contains the first two bytes of the hash for fast rejection
- // of bad signed data.
- HashTag [2]byte
-
- // Metadata includes format, filename and time, and is protected by v5
- // signatures of type 0x00 or 0x01. This metadata is included into the hash
- // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4.
- Metadata *LiteralData
-
- CreationTime time.Time
-
- RSASignature encoding.Field
- DSASigR, DSASigS encoding.Field
- ECDSASigR, ECDSASigS encoding.Field
- EdDSASigR, EdDSASigS encoding.Field
-
- // rawSubpackets contains the unparsed subpackets, in order.
- rawSubpackets []outputSubpacket
-
- // The following are optional so are nil when not included in the
- // signature.
-
- SigLifetimeSecs, KeyLifetimeSecs *uint32
- PreferredSymmetric, PreferredHash, PreferredCompression []uint8
- PreferredCipherSuites [][2]uint8
- IssuerKeyId *uint64
- IssuerFingerprint []byte
- SignerUserId *string
- IsPrimaryId *bool
- Notations []*Notation
-
- // TrustLevel and TrustAmount can be set by the signer to assert that
- // the key is not only valid but also trustworthy at the specified
- // level.
- // See RFC 4880, section 5.2.3.13 for details.
- TrustLevel TrustLevel
- TrustAmount TrustAmount
-
- // TrustRegularExpression can be used in conjunction with trust Signature
- // packets to limit the scope of the trust that is extended.
- // See RFC 4880, section 5.2.3.14 for details.
- TrustRegularExpression *string
-
- // PolicyURI can be set to the URI of a document that describes the
- // policy under which the signature was issued. See RFC 4880, section
- // 5.2.3.20 for details.
- PolicyURI string
-
- // FlagsValid is set if any flags were given. See RFC 4880, section
- // 5.2.3.21 for details.
- FlagsValid bool
- FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool
-
- // RevocationReason is set if this signature has been revoked.
- // See RFC 4880, section 5.2.3.23 for details.
- RevocationReason *ReasonForRevocation
- RevocationReasonText string
-
- // In a self-signature, these flags are set there is a features subpacket
- // indicating that the issuer implementation supports these features
- // see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket
- SEIPDv1, SEIPDv2 bool
-
- // EmbeddedSignature, if non-nil, is a signature of the parent key, by
- // this key. This prevents an attacker from claiming another's signing
- // subkey as their own.
- EmbeddedSignature *Signature
-
- outSubpackets []outputSubpacket
-}
-
-func (sig *Signature) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.2.3
- var buf [5]byte
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- if buf[0] != 4 && buf[0] != 5 {
- err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
- return
- }
- sig.Version = int(buf[0])
- _, err = readFull(r, buf[:5])
- if err != nil {
- return
- }
- sig.SigType = SignatureType(buf[0])
- sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA:
- default:
- err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
- return
- }
-
- var ok bool
-
- if sig.Version < 5 {
- sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2])
- } else {
- sig.Hash, ok = algorithm.HashIdToHash(buf[2])
- }
-
- if !ok {
- return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
- }
-
- hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4])
- hashedSubpackets := make([]byte, hashedSubpacketsLength)
- _, err = readFull(r, hashedSubpackets)
- if err != nil {
- return
- }
- err = sig.buildHashSuffix(hashedSubpackets)
- if err != nil {
- return
- }
-
- err = parseSignatureSubpackets(sig, hashedSubpackets, true)
- if err != nil {
- return
- }
-
- _, err = readFull(r, buf[:2])
- if err != nil {
- return
- }
- unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1])
- unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
- _, err = readFull(r, unhashedSubpackets)
- if err != nil {
- return
- }
- err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
- if err != nil {
- return
- }
-
- _, err = readFull(r, sig.HashTag[:2])
- if err != nil {
- return
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sig.RSASignature = new(encoding.MPI)
- _, err = sig.RSASignature.ReadFrom(r)
- case PubKeyAlgoDSA:
- sig.DSASigR = new(encoding.MPI)
- if _, err = sig.DSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.DSASigS = new(encoding.MPI)
- _, err = sig.DSASigS.ReadFrom(r)
- case PubKeyAlgoECDSA:
- sig.ECDSASigR = new(encoding.MPI)
- if _, err = sig.ECDSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.ECDSASigS = new(encoding.MPI)
- _, err = sig.ECDSASigS.ReadFrom(r)
- case PubKeyAlgoEdDSA:
- sig.EdDSASigR = new(encoding.MPI)
- if _, err = sig.EdDSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.EdDSASigS = new(encoding.MPI)
- if _, err = sig.EdDSASigS.ReadFrom(r); err != nil {
- return
- }
- default:
- panic("unreachable")
- }
- return
-}
-
-// parseSignatureSubpackets parses subpackets of the main signature packet. See
-// RFC 4880, section 5.2.3.1.
-func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
- for len(subpackets) > 0 {
- subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
- if err != nil {
- return
- }
- }
-
- if sig.CreationTime.IsZero() {
- err = errors.StructuralError("no creation time in signature")
- }
-
- return
-}
-
-type signatureSubpacketType uint8
-
-const (
- creationTimeSubpacket signatureSubpacketType = 2
- signatureExpirationSubpacket signatureSubpacketType = 3
- trustSubpacket signatureSubpacketType = 5
- regularExpressionSubpacket signatureSubpacketType = 6
- keyExpirationSubpacket signatureSubpacketType = 9
- prefSymmetricAlgosSubpacket signatureSubpacketType = 11
- issuerSubpacket signatureSubpacketType = 16
- notationDataSubpacket signatureSubpacketType = 20
- prefHashAlgosSubpacket signatureSubpacketType = 21
- prefCompressionSubpacket signatureSubpacketType = 22
- primaryUserIdSubpacket signatureSubpacketType = 25
- policyUriSubpacket signatureSubpacketType = 26
- keyFlagsSubpacket signatureSubpacketType = 27
- signerUserIdSubpacket signatureSubpacketType = 28
- reasonForRevocationSubpacket signatureSubpacketType = 29
- featuresSubpacket signatureSubpacketType = 30
- embeddedSignatureSubpacket signatureSubpacketType = 32
- issuerFingerprintSubpacket signatureSubpacketType = 33
- prefCipherSuitesSubpacket signatureSubpacketType = 39
-)
-
-// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
-func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
- // RFC 4880, section 5.2.3.1
- var (
- length uint32
- packetType signatureSubpacketType
- isCritical bool
- )
- switch {
- case subpacket[0] < 192:
- length = uint32(subpacket[0])
- subpacket = subpacket[1:]
- case subpacket[0] < 255:
- if len(subpacket) < 2 {
- goto Truncated
- }
- length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
- subpacket = subpacket[2:]
- default:
- if len(subpacket) < 5 {
- goto Truncated
- }
- length = uint32(subpacket[1])<<24 |
- uint32(subpacket[2])<<16 |
- uint32(subpacket[3])<<8 |
- uint32(subpacket[4])
- subpacket = subpacket[5:]
- }
- if length > uint32(len(subpacket)) {
- goto Truncated
- }
- rest = subpacket[length:]
- subpacket = subpacket[:length]
- if len(subpacket) == 0 {
- err = errors.StructuralError("zero length signature subpacket")
- return
- }
- packetType = signatureSubpacketType(subpacket[0] & 0x7f)
- isCritical = subpacket[0]&0x80 == 0x80
- subpacket = subpacket[1:]
- sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
- if !isHashed &&
- packetType != issuerSubpacket &&
- packetType != issuerFingerprintSubpacket &&
- packetType != embeddedSignatureSubpacket {
- return
- }
- switch packetType {
- case creationTimeSubpacket:
- if len(subpacket) != 4 {
- err = errors.StructuralError("signature creation time not four bytes")
- return
- }
- t := binary.BigEndian.Uint32(subpacket)
- sig.CreationTime = time.Unix(int64(t), 0)
- case signatureExpirationSubpacket:
- // Signature expiration time, section 5.2.3.10
- if len(subpacket) != 4 {
- err = errors.StructuralError("expiration subpacket with bad length")
- return
- }
- sig.SigLifetimeSecs = new(uint32)
- *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
- case trustSubpacket:
- // Trust level and amount, section 5.2.3.13
- sig.TrustLevel = TrustLevel(subpacket[0])
- sig.TrustAmount = TrustAmount(subpacket[1])
- case regularExpressionSubpacket:
- // Trust regular expression, section 5.2.3.14
- // RFC specifies the string should be null-terminated; remove a null byte from the end
- if subpacket[len(subpacket)-1] != 0x00 {
- err = errors.StructuralError("expected regular expression to be null-terminated")
- return
- }
- trustRegularExpression := string(subpacket[:len(subpacket)-1])
- sig.TrustRegularExpression = &trustRegularExpression
- case keyExpirationSubpacket:
- // Key expiration time, section 5.2.3.6
- if len(subpacket) != 4 {
- err = errors.StructuralError("key expiration subpacket with bad length")
- return
- }
- sig.KeyLifetimeSecs = new(uint32)
- *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
- case prefSymmetricAlgosSubpacket:
- // Preferred symmetric algorithms, section 5.2.3.7
- sig.PreferredSymmetric = make([]byte, len(subpacket))
- copy(sig.PreferredSymmetric, subpacket)
- case issuerSubpacket:
- // Issuer, section 5.2.3.5
- if sig.Version > 4 {
- err = errors.StructuralError("issuer subpacket found in v5 key")
- return
- }
- if len(subpacket) != 8 {
- err = errors.StructuralError("issuer subpacket with bad length")
- return
- }
- sig.IssuerKeyId = new(uint64)
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
- case notationDataSubpacket:
- // Notation data, section 5.2.3.16
- if len(subpacket) < 8 {
- err = errors.StructuralError("notation data subpacket with bad length")
- return
- }
-
- nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5])
- valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7])
- if len(subpacket) != int(nameLength) + int(valueLength) + 8 {
- err = errors.StructuralError("notation data subpacket with bad length")
- return
- }
-
- notation := Notation{
- IsHumanReadable: (subpacket[0] & 0x80) == 0x80,
- Name: string(subpacket[8: (nameLength + 8)]),
- Value: subpacket[(nameLength + 8) : (valueLength + nameLength + 8)],
- IsCritical: isCritical,
- }
-
- sig.Notations = append(sig.Notations, ¬ation)
- case prefHashAlgosSubpacket:
- // Preferred hash algorithms, section 5.2.3.8
- sig.PreferredHash = make([]byte, len(subpacket))
- copy(sig.PreferredHash, subpacket)
- case prefCompressionSubpacket:
- // Preferred compression algorithms, section 5.2.3.9
- sig.PreferredCompression = make([]byte, len(subpacket))
- copy(sig.PreferredCompression, subpacket)
- case primaryUserIdSubpacket:
- // Primary User ID, section 5.2.3.19
- if len(subpacket) != 1 {
- err = errors.StructuralError("primary user id subpacket with bad length")
- return
- }
- sig.IsPrimaryId = new(bool)
- if subpacket[0] > 0 {
- *sig.IsPrimaryId = true
- }
- case keyFlagsSubpacket:
- // Key flags, section 5.2.3.21
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty key flags subpacket")
- return
- }
- sig.FlagsValid = true
- if subpacket[0]&KeyFlagCertify != 0 {
- sig.FlagCertify = true
- }
- if subpacket[0]&KeyFlagSign != 0 {
- sig.FlagSign = true
- }
- if subpacket[0]&KeyFlagEncryptCommunications != 0 {
- sig.FlagEncryptCommunications = true
- }
- if subpacket[0]&KeyFlagEncryptStorage != 0 {
- sig.FlagEncryptStorage = true
- }
- if subpacket[0]&KeyFlagSplitKey != 0 {
- sig.FlagSplitKey = true
- }
- if subpacket[0]&KeyFlagAuthenticate != 0 {
- sig.FlagAuthenticate = true
- }
- if subpacket[0]&KeyFlagGroupKey != 0 {
- sig.FlagGroupKey = true
- }
- case signerUserIdSubpacket:
- userId := string(subpacket)
- sig.SignerUserId = &userId
- case reasonForRevocationSubpacket:
- // Reason For Revocation, section 5.2.3.23
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty revocation reason subpacket")
- return
- }
- sig.RevocationReason = new(ReasonForRevocation)
- *sig.RevocationReason = ReasonForRevocation(subpacket[0])
- sig.RevocationReasonText = string(subpacket[1:])
- case featuresSubpacket:
- // Features subpacket, section 5.2.3.24 specifies a very general
- // mechanism for OpenPGP implementations to signal support for new
- // features.
- if len(subpacket) > 0 {
- if subpacket[0]&0x01 != 0 {
- sig.SEIPDv1 = true
- }
- // 0x02 and 0x04 are reserved
- if subpacket[0]&0x08 != 0 {
- sig.SEIPDv2 = true
- }
- }
- case embeddedSignatureSubpacket:
- // Only usage is in signatures that cross-certify
- // signing subkeys. section 5.2.3.26 describes the
- // format, with its usage described in section 11.1
- if sig.EmbeddedSignature != nil {
- err = errors.StructuralError("Cannot have multiple embedded signatures")
- return
- }
- sig.EmbeddedSignature = new(Signature)
- // Embedded signatures are required to be v4 signatures see
- // section 12.1. However, we only parse v4 signatures in this
- // file anyway.
- if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
- return nil, err
- }
- if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
- return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
- }
- case policyUriSubpacket:
- // Policy URI, section 5.2.3.20
- sig.PolicyURI = string(subpacket)
- case issuerFingerprintSubpacket:
- v, l := subpacket[0], len(subpacket[1:])
- if v == 5 && l != 32 || v != 5 && l != 20 {
- return nil, errors.StructuralError("bad fingerprint length")
- }
- sig.IssuerFingerprint = make([]byte, l)
- copy(sig.IssuerFingerprint, subpacket[1:])
- sig.IssuerKeyId = new(uint64)
- if v == 5 {
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9])
- } else {
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21])
- }
- case prefCipherSuitesSubpacket:
- // Preferred AEAD cipher suites
- // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites
- if len(subpacket) % 2 != 0 {
- err = errors.StructuralError("invalid aead cipher suite length")
- return
- }
-
- sig.PreferredCipherSuites = make([][2]byte, len(subpacket) / 2)
-
- for i := 0; i < len(subpacket) / 2; i++ {
- sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]}
- }
- default:
- if isCritical {
- err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
- return
- }
- }
- return
-
-Truncated:
- err = errors.StructuralError("signature subpacket truncated")
- return
-}
-
-// subpacketLengthLength returns the length, in bytes, of an encoded length value.
-func subpacketLengthLength(length int) int {
- if length < 192 {
- return 1
- }
- if length < 16320 {
- return 2
- }
- return 5
-}
-
-func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool {
- if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 {
- return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint)
- }
- return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId
-}
-
-// serializeSubpacketLength marshals the given length into to.
-func serializeSubpacketLength(to []byte, length int) int {
- // RFC 4880, Section 4.2.2.
- if length < 192 {
- to[0] = byte(length)
- return 1
- }
- if length < 16320 {
- length -= 192
- to[0] = byte((length >> 8) + 192)
- to[1] = byte(length)
- return 2
- }
- to[0] = 255
- to[1] = byte(length >> 24)
- to[2] = byte(length >> 16)
- to[3] = byte(length >> 8)
- to[4] = byte(length)
- return 5
-}
-
-// subpacketsLength returns the serialized length, in bytes, of the given
-// subpackets.
-func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
- for _, subpacket := range subpackets {
- if subpacket.hashed == hashed {
- length += subpacketLengthLength(len(subpacket.contents) + 1)
- length += 1 // type byte
- length += len(subpacket.contents)
- }
- }
- return
-}
-
-// serializeSubpackets marshals the given subpackets into to.
-func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
- for _, subpacket := range subpackets {
- if subpacket.hashed == hashed {
- n := serializeSubpacketLength(to, len(subpacket.contents)+1)
- to[n] = byte(subpacket.subpacketType)
- if subpacket.isCritical {
- to[n] |= 0x80
- }
- to = to[1+n:]
- n = copy(to, subpacket.contents)
- to = to[n:]
- }
- }
- return
-}
-
-// SigExpired returns whether sig is a signature that has expired or is created
-// in the future.
-func (sig *Signature) SigExpired(currentTime time.Time) bool {
- if sig.CreationTime.After(currentTime) {
- return true
- }
- if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 {
- return false
- }
- expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second)
- return currentTime.After(expiry)
-}
-
-// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
-func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) {
- var hashId byte
- var ok bool
-
- if sig.Version < 5 {
- hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash)
- } else {
- hashId, ok = algorithm.HashToHashId(sig.Hash)
- }
-
- if !ok {
- sig.HashSuffix = nil
- return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
- }
-
- hashedFields := bytes.NewBuffer([]byte{
- uint8(sig.Version),
- uint8(sig.SigType),
- uint8(sig.PubKeyAlgo),
- uint8(hashId),
- uint8(len(hashedSubpackets) >> 8),
- uint8(len(hashedSubpackets)),
- })
- hashedFields.Write(hashedSubpackets)
-
- var l uint64 = uint64(6 + len(hashedSubpackets))
- if sig.Version == 5 {
- hashedFields.Write([]byte{0x05, 0xff})
- hashedFields.Write([]byte{
- uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32),
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- } else {
- hashedFields.Write([]byte{0x04, 0xff})
- hashedFields.Write([]byte{
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- }
- sig.HashSuffix = make([]byte, hashedFields.Len())
- copy(sig.HashSuffix, hashedFields.Bytes())
- return
-}
-
-func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
- hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
- hashedSubpackets := make([]byte, hashedSubpacketsLen)
- serializeSubpackets(hashedSubpackets, sig.outSubpackets, true)
- err = sig.buildHashSuffix(hashedSubpackets)
- if err != nil {
- return
- }
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.AddMetadataToHashSuffix()
- }
-
- h.Write(sig.HashSuffix)
- digest = h.Sum(nil)
- copy(sig.HashTag[:], digest)
- return
-}
-
-// Sign signs a message with a private key. The hash, h, must contain
-// the hash of the message to be signed and will be mutated by this function.
-// On success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- sig.Version = priv.PublicKey.Version
- sig.IssuerFingerprint = priv.PublicKey.Fingerprint
- sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey)
- if err != nil {
- return err
- }
- digest, err := sig.signPrepareHash(h)
- if err != nil {
- return
- }
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- // supports both *rsa.PrivateKey and crypto.Signer
- sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
- if err == nil {
- sig.RSASignature = encoding.NewMPI(sigdata)
- }
- case PubKeyAlgoDSA:
- dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
-
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
- if len(digest) > subgroupSize {
- digest = digest[:subgroupSize]
- }
- r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
- if err == nil {
- sig.DSASigR = new(encoding.MPI).SetBig(r)
- sig.DSASigS = new(encoding.MPI).SetBig(s)
- }
- case PubKeyAlgoECDSA:
- sk := priv.PrivateKey.(*ecdsa.PrivateKey)
- r, s, err := ecdsa.Sign(config.Random(), sk, digest)
-
- if err == nil {
- sig.ECDSASigR = new(encoding.MPI).SetBig(r)
- sig.ECDSASigS = new(encoding.MPI).SetBig(s)
- }
- case PubKeyAlgoEdDSA:
- sk := priv.PrivateKey.(*eddsa.PrivateKey)
- r, s, err := eddsa.Sign(sk, digest)
- if err == nil {
- sig.EdDSASigR = encoding.NewMPI(r)
- sig.EdDSASigS = encoding.NewMPI(s)
- }
- default:
- err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
- }
-
- return
-}
-
-// SignUserId computes a signature from priv, asserting that pub is a valid
-// key for the identity id. On success, the signature is stored in sig. Call
-// Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- h, err := userIdSignatureHash(id, pub, sig.Hash)
- if err != nil {
- return err
- }
- return sig.Sign(h, priv, config)
-}
-
-// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success,
-// the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey,
- config *Config) error {
- h, err := keySignatureHash(hashKey, pub, sig.Hash)
- if err != nil {
- return err
- }
- return sig.Sign(h, signingKey, config)
-}
-
-// SignKey computes a signature from priv, asserting that pub is a subkey. On
-// success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash)
- if err != nil {
- return err
- }
- return sig.Sign(h, priv, config)
-}
-
-// RevokeKey computes a revocation signature of pub using priv. On success, the signature is
-// stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- h, err := keyRevocationHash(pub, sig.Hash)
- if err != nil {
- return err
- }
- return sig.Sign(h, priv, config)
-}
-
-// RevokeSubkey computes a subkey revocation signature of pub using priv.
-// On success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- // Identical to a subkey binding signature
- return sig.SignKey(pub, priv, config)
-}
-
-// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
-// called first.
-func (sig *Signature) Serialize(w io.Writer) (err error) {
- if len(sig.outSubpackets) == 0 {
- sig.outSubpackets = sig.rawSubpackets
- }
- if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil {
- return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
- }
-
- sigLength := 0
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sigLength = int(sig.RSASignature.EncodedLength())
- case PubKeyAlgoDSA:
- sigLength = int(sig.DSASigR.EncodedLength())
- sigLength += int(sig.DSASigS.EncodedLength())
- case PubKeyAlgoECDSA:
- sigLength = int(sig.ECDSASigR.EncodedLength())
- sigLength += int(sig.ECDSASigS.EncodedLength())
- case PubKeyAlgoEdDSA:
- sigLength = int(sig.EdDSASigR.EncodedLength())
- sigLength += int(sig.EdDSASigS.EncodedLength())
- default:
- panic("impossible")
- }
-
- unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
- length := len(sig.HashSuffix) - 6 /* trailer not included */ +
- 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
- 2 /* hash tag */ + sigLength
- if sig.Version == 5 {
- length -= 4 // eight-octet instead of four-octet big endian
- }
- err = serializeHeader(w, packetTypeSignature, length)
- if err != nil {
- return
- }
- err = sig.serializeBody(w)
- if err != nil {
- return err
- }
- return
-}
-
-func (sig *Signature) serializeBody(w io.Writer) (err error) {
- hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5])
- fields := sig.HashSuffix[:6+hashedSubpacketsLen]
- _, err = w.Write(fields)
- if err != nil {
- return
- }
-
- unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
- unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen)
- unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
- unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
- serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
-
- _, err = w.Write(unhashedSubpackets)
- if err != nil {
- return
- }
- _, err = w.Write(sig.HashTag[:])
- if err != nil {
- return
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- _, err = w.Write(sig.RSASignature.EncodedBytes())
- case PubKeyAlgoDSA:
- if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.DSASigS.EncodedBytes())
- case PubKeyAlgoECDSA:
- if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.ECDSASigS.EncodedBytes())
- case PubKeyAlgoEdDSA:
- if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.EdDSASigS.EncodedBytes())
- default:
- panic("impossible")
- }
- return
-}
-
-// outputSubpacket represents a subpacket to be marshaled.
-type outputSubpacket struct {
- hashed bool // true if this subpacket is in the hashed area.
- subpacketType signatureSubpacketType
- isCritical bool
- contents []byte
-}
-
-func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) {
- creationTime := make([]byte, 4)
- binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
- subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
-
- if sig.IssuerKeyId != nil && sig.Version == 4 {
- keyId := make([]byte, 8)
- binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
- subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId})
- }
- if sig.IssuerFingerprint != nil {
- contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...)
- subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents})
- }
- if sig.SignerUserId != nil {
- subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)})
- }
- if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
- sigLifetime := make([]byte, 4)
- binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
- subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
- }
-
- // Key flags may only appear in self-signatures or certification signatures.
-
- if sig.FlagsValid {
- var flags byte
- if sig.FlagCertify {
- flags |= KeyFlagCertify
- }
- if sig.FlagSign {
- flags |= KeyFlagSign
- }
- if sig.FlagEncryptCommunications {
- flags |= KeyFlagEncryptCommunications
- }
- if sig.FlagEncryptStorage {
- flags |= KeyFlagEncryptStorage
- }
- if sig.FlagSplitKey {
- flags |= KeyFlagSplitKey
- }
- if sig.FlagAuthenticate {
- flags |= KeyFlagAuthenticate
- }
- if sig.FlagGroupKey {
- flags |= KeyFlagGroupKey
- }
- subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
- }
-
- for _, notation := range sig.Notations {
- subpackets = append(
- subpackets,
- outputSubpacket{
- true,
- notationDataSubpacket,
- notation.IsCritical,
- notation.getData(),
- })
- }
-
- // The following subpackets may only appear in self-signatures.
-
- var features = byte(0x00)
- if sig.SEIPDv1 {
- features |= 0x01
- }
- if sig.SEIPDv2 {
- features |= 0x08
- }
-
- if features != 0x00 {
- subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}})
- }
-
- if sig.TrustLevel != 0 {
- subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}})
- }
-
- if sig.TrustRegularExpression != nil {
- // RFC specifies the string should be null-terminated; add a null byte to the end
- subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")})
- }
-
- if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
- keyLifetime := make([]byte, 4)
- binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
- subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
- }
-
- if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
- subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
- }
-
- if len(sig.PreferredSymmetric) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
- }
-
- if len(sig.PreferredHash) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
- }
-
- if len(sig.PreferredCompression) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
- }
-
- if len(sig.PolicyURI) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)})
- }
-
- if len(sig.PreferredCipherSuites) > 0 {
- serialized := make([]byte, len(sig.PreferredCipherSuites)*2)
- for i, cipherSuite := range sig.PreferredCipherSuites {
- serialized[2*i] = cipherSuite[0]
- serialized[2*i+1] = cipherSuite[1]
- }
- subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized})
- }
-
- // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23.
- if sig.RevocationReason != nil {
- subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true,
- append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)})
- }
-
- // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26.
- if sig.EmbeddedSignature != nil {
- var buf bytes.Buffer
- err = sig.EmbeddedSignature.serializeBody(&buf)
- if err != nil {
- return
- }
- subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()})
- }
-
- return
-}
-
-// AddMetadataToHashSuffix modifies the current hash suffix to include metadata
-// (format, filename, and time). Version 5 keys protect this data including it
-// in the hash computation. See section 5.2.4.
-func (sig *Signature) AddMetadataToHashSuffix() {
- if sig == nil || sig.Version != 5 {
- return
- }
- if sig.SigType != 0x00 && sig.SigType != 0x01 {
- return
- }
- lit := sig.Metadata
- if lit == nil {
- // This will translate into six 0x00 bytes.
- lit = &LiteralData{}
- }
-
- // Extract the current byte count
- n := sig.HashSuffix[len(sig.HashSuffix)-8:]
- l := uint64(
- uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 |
- uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7]))
-
- suffix := bytes.NewBuffer(nil)
- suffix.Write(sig.HashSuffix[:l])
-
- // Add the metadata
- var buf [4]byte
- buf[0] = lit.Format
- fileName := lit.FileName
- if len(lit.FileName) > 255 {
- fileName = fileName[:255]
- }
- buf[1] = byte(len(fileName))
- suffix.Write(buf[:2])
- suffix.Write([]byte(lit.FileName))
- binary.BigEndian.PutUint32(buf[:], lit.Time)
- suffix.Write(buf[:])
-
- // Update the counter and restore trailing bytes
- l = uint64(suffix.Len())
- suffix.Write([]byte{0x05, 0xff})
- suffix.Write([]byte{
- uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32),
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- sig.HashSuffix = suffix.Bytes()
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
deleted file mode 100644
index bc2caf0e..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/sha256"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/s2k"
- "golang.org/x/crypto/hkdf"
-)
-
-// This is the largest session key that we'll support. Since at most 256-bit cipher
-// is supported in OpenPGP, this is large enough to contain also the auth tag.
-const maxSessionKeySizeInBytes = 64
-
-// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
-// 4880, section 5.3.
-type SymmetricKeyEncrypted struct {
- Version int
- CipherFunc CipherFunction
- Mode AEADMode
- s2k func(out, in []byte)
- iv []byte
- encryptedKey []byte // Contains also the authentication tag for AEAD
-}
-
-// parse parses an SymmetricKeyEncrypted packet as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses
-func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
- var buf [1]byte
-
- // Version
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- ske.Version = int(buf[0])
- if ske.Version != 4 && ske.Version != 5 {
- return errors.UnsupportedError("unknown SymmetricKeyEncrypted version")
- }
-
- if ske.Version == 5 {
- // Scalar octet count
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- }
-
- // Cipher function
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- ske.CipherFunc = CipherFunction(buf[0])
- if !ske.CipherFunc.IsSupported() {
- return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0])))
- }
-
- if ske.Version == 5 {
- // AEAD mode
- if _, err := readFull(r, buf[:]); err != nil {
- return errors.StructuralError("cannot read AEAD octet from packet")
- }
- ske.Mode = AEADMode(buf[0])
-
- // Scalar octet count
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- }
-
- var err error
- if ske.s2k, err = s2k.Parse(r); err != nil {
- if _, ok := err.(errors.ErrDummyPrivateKey); ok {
- return errors.UnsupportedError("missing key GNU extension in session key")
- }
- return err
- }
-
- if ske.Version == 5 {
- // AEAD IV
- iv := make([]byte, ske.Mode.IvLength())
- _, err := readFull(r, iv)
- if err != nil {
- return errors.StructuralError("cannot read AEAD IV")
- }
-
- ske.iv = iv
- }
-
- encryptedKey := make([]byte, maxSessionKeySizeInBytes)
- // The session key may follow. We just have to try and read to find
- // out. If it exists then we limit it to maxSessionKeySizeInBytes.
- n, err := readFull(r, encryptedKey)
- if err != nil && err != io.ErrUnexpectedEOF {
- return err
- }
-
- if n != 0 {
- if n == maxSessionKeySizeInBytes {
- return errors.UnsupportedError("oversized encrypted session key")
- }
- ske.encryptedKey = encryptedKey[:n]
- }
- return nil
-}
-
-// Decrypt attempts to decrypt an encrypted session key and returns the key and
-// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
-// packet.
-func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
- key := make([]byte, ske.CipherFunc.KeySize())
- ske.s2k(key, passphrase)
- if len(ske.encryptedKey) == 0 {
- return key, ske.CipherFunc, nil
- }
- switch ske.Version {
- case 4:
- plaintextKey, cipherFunc, err := ske.decryptV4(key)
- return plaintextKey, cipherFunc, err
- case 5:
- plaintextKey, err := ske.decryptV5(key)
- return plaintextKey, CipherFunction(0), err
- }
- err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version")
- return nil, CipherFunction(0), err
-}
-
-func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) {
- // the IV is all zeros
- iv := make([]byte, ske.CipherFunc.blockSize())
- c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
- plaintextKey := make([]byte, len(ske.encryptedKey))
- c.XORKeyStream(plaintextKey, ske.encryptedKey)
- cipherFunc := CipherFunction(plaintextKey[0])
- if cipherFunc.blockSize() == 0 {
- return nil, ske.CipherFunc, errors.UnsupportedError(
- "unknown cipher: " + strconv.Itoa(int(cipherFunc)))
- }
- plaintextKey = plaintextKey[1:]
- if len(plaintextKey) != cipherFunc.KeySize() {
- return nil, cipherFunc, errors.StructuralError(
- "length of decrypted key not equal to cipher keysize")
- }
- return plaintextKey, cipherFunc, nil
-}
-
-func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) {
- adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)}
- aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata)
-
- plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata)
- if err != nil {
- return nil, err
- }
- return plaintextKey, nil
-}
-
-// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w.
-// The packet contains a random session key, encrypted by a key derived from
-// the given passphrase. The session key is returned and must be passed to
-// SerializeSymmetricallyEncrypted.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
- cipherFunc := config.Cipher()
-
- sessionKey := make([]byte, cipherFunc.KeySize())
- _, err = io.ReadFull(config.Random(), sessionKey)
- if err != nil {
- return
- }
-
- err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config)
- if err != nil {
- return
- }
-
- key = sessionKey
- return
-}
-
-// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w.
-// The packet contains the given session key, encrypted by a key derived from
-// the given passphrase. The returned session key must be passed to
-// SerializeSymmetricallyEncrypted.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) {
- var version int
- if config.AEAD() != nil {
- version = 5
- } else {
- version = 4
- }
- cipherFunc := config.Cipher()
- // cipherFunc must be AES
- if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 {
- return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc)))
- }
-
- keySize := cipherFunc.KeySize()
- s2kBuf := new(bytes.Buffer)
- keyEncryptingKey := make([]byte, keySize)
- // s2k.Serialize salts and stretches the passphrase, and writes the
- // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
- err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()})
- if err != nil {
- return
- }
- s2kBytes := s2kBuf.Bytes()
-
- var packetLength int
- switch version {
- case 4:
- packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
- case 5:
- ivLen := config.AEAD().Mode().IvLength()
- tagLen := config.AEAD().Mode().TagLength()
- packetLength = 5 + len(s2kBytes) + ivLen + keySize + tagLen
- }
- err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
- if err != nil {
- return
- }
-
- // Symmetric Key Encrypted Version
- buf := []byte{byte(version)}
-
- if version == 5 {
- // Scalar octet count
- buf = append(buf, byte(3 + len(s2kBytes) + config.AEAD().Mode().IvLength()))
- }
-
- // Cipher function
- buf = append(buf, byte(cipherFunc))
-
- if version == 5 {
- // AEAD mode
- buf = append(buf, byte(config.AEAD().Mode()))
-
- // Scalar octet count
- buf = append(buf, byte(len(s2kBytes)))
- }
- _, err = w.Write(buf)
- if err != nil {
- return
- }
- _, err = w.Write(s2kBytes)
- if err != nil {
- return
- }
-
- switch version {
- case 4:
- iv := make([]byte, cipherFunc.blockSize())
- c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
- encryptedCipherAndKey := make([]byte, keySize+1)
- c.XORKeyStream(encryptedCipherAndKey, buf[1:])
- c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
- _, err = w.Write(encryptedCipherAndKey)
- if err != nil {
- return
- }
- case 5:
- mode := config.AEAD().Mode()
- adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)}
- aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata)
-
- // Sample iv using random reader
- iv := make([]byte, config.AEAD().Mode().IvLength())
- _, err = io.ReadFull(config.Random(), iv)
- if err != nil {
- return
- }
- // Seal and write (encryptedData includes auth. tag)
-
- encryptedData := aead.Seal(nil, iv, sessionKey, adata)
- _, err = w.Write(iv)
- if err != nil {
- return
- }
- _, err = w.Write(encryptedData)
- if err != nil {
- return
- }
- }
-
- return
-}
-
-func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) {
- hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData)
-
- encryptionKey := make([]byte, c.KeySize())
- _, _ = readFull(hkdfReader, encryptionKey)
-
- blockCipher := c.new(encryptionKey)
- return mode.new(blockCipher)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go
deleted file mode 100644
index dc1a2403..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-const aeadSaltSize = 32
-
-// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
-// encrypted Contents will consist of more OpenPGP packets. See RFC 4880,
-// sections 5.7 and 5.13.
-type SymmetricallyEncrypted struct {
- Version int
- Contents io.Reader // contains tag for version 2
- IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9
-
- // Specific to version 1
- prefix []byte
-
- // Specific to version 2
- cipher CipherFunction
- mode AEADMode
- chunkSizeByte byte
- salt [aeadSaltSize]byte
-}
-
-const (
- symmetricallyEncryptedVersionMdc = 1
- symmetricallyEncryptedVersionAead = 2
-)
-
-
-func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
- if se.IntegrityProtected {
- // See RFC 4880, section 5.13.
- var buf [1]byte
- _, err := readFull(r, buf[:])
- if err != nil {
- return err
- }
-
- switch buf[0] {
- case symmetricallyEncryptedVersionMdc:
- se.Version = symmetricallyEncryptedVersionMdc
- case symmetricallyEncryptedVersionAead:
- se.Version = symmetricallyEncryptedVersionAead
- if err := se.parseAead(r); err != nil {
- return err
- }
- default:
- return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
- }
- }
- se.Contents = r
- return nil
-}
-
-// Decrypt returns a ReadCloser, from which the decrypted Contents of the
-// packet can be read. An incorrect key will only be detected after trying
-// to decrypt the entire data.
-func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
- if se.Version == symmetricallyEncryptedVersionAead {
- return se.decryptAead(key)
- }
-
- return se.decryptMdc(c, key)
-}
-
-// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
-// to w and returns a WriteCloser to which the to-be-encrypted packets can be
-// written.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) {
- writeCloser := noOpCloser{w}
- ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected)
- if err != nil {
- return
- }
-
- if aeadSupported {
- return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key)
- }
-
- return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go
deleted file mode 100644
index 241800c0..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2023 Proton AG. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto/cipher"
- "crypto/sha256"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "golang.org/x/crypto/hkdf"
- "io"
-)
-
-// parseAead parses a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error {
- headerData := make([]byte, 3)
- if n, err := io.ReadFull(r, headerData); n < 3 {
- return errors.StructuralError("could not read aead header: " + err.Error())
- }
-
- // Cipher
- se.cipher = CipherFunction(headerData[0])
- // cipherFunc must have block size 16 to use AEAD
- if se.cipher.blockSize() != 16 {
- return errors.UnsupportedError("invalid aead cipher: " + string(se.cipher))
- }
-
- // Mode
- se.mode = AEADMode(headerData[1])
- if se.mode.TagLength() == 0 {
- return errors.UnsupportedError("unknown aead mode: " + string(se.mode))
- }
-
- // Chunk size
- se.chunkSizeByte = headerData[2]
- if se.chunkSizeByte > 16 {
- return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.chunkSizeByte))
- }
-
- // Salt
- if n, err := io.ReadFull(r, se.salt[:]); n < aeadSaltSize {
- return errors.StructuralError("could not read aead salt: " + err.Error())
- }
-
- return nil
-}
-
-// associatedData for chunks: tag, version, cipher, mode, chunk size byte
-func (se *SymmetricallyEncrypted) associatedData() []byte {
- return []byte{
- 0xD2,
- symmetricallyEncryptedVersionAead,
- byte(se.cipher),
- byte(se.mode),
- se.chunkSizeByte,
- }
-}
-
-// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) {
- aead, nonce := getSymmetricallyEncryptedAeadInstance(se.cipher, se.mode, inputKey, se.salt[:], se.associatedData())
-
- // Carry the first tagLen bytes
- tagLen := se.mode.TagLength()
- peekedBytes := make([]byte, tagLen)
- n, err := io.ReadFull(se.Contents, peekedBytes)
- if n < tagLen || (err != nil && err != io.EOF) {
- return nil, errors.StructuralError("not enough data to decrypt:" + err.Error())
- }
-
- return &aeadDecrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: decodeAEADChunkSize(se.chunkSizeByte),
- initialNonce: nonce,
- associatedData: se.associatedData(),
- chunkIndex: make([]byte, 8),
- packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
- },
- reader: se.Contents,
- peekedBytes: peekedBytes,
- }, nil
-}
-
-// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) {
- // cipherFunc must have block size 16 to use AEAD
- if cipherSuite.Cipher.blockSize() != 16 {
- return nil, errors.InvalidArgumentError("invalid aead cipher function")
- }
-
- if cipherSuite.Cipher.KeySize() != len(inputKey) {
- return nil, errors.InvalidArgumentError("error in aead serialization: bad key length")
- }
-
- // Data for en/decryption: tag, version, cipher, aead mode, chunk size
- prefix := []byte{
- 0xD2,
- symmetricallyEncryptedVersionAead,
- byte(cipherSuite.Cipher),
- byte(cipherSuite.Mode),
- chunkSizeByte,
- }
-
- // Write header (that correspond to prefix except first byte)
- n, err := ciphertext.Write(prefix[1:])
- if err != nil || n < 4 {
- return nil, err
- }
-
- // Random salt
- salt := make([]byte, aeadSaltSize)
- if _, err := rand.Read(salt); err != nil {
- return nil, err
- }
-
- if _, err := ciphertext.Write(salt); err != nil {
- return nil, err
- }
-
- aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix)
-
- return &aeadEncrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: decodeAEADChunkSize(chunkSizeByte),
- associatedData: prefix,
- chunkIndex: make([]byte, 8),
- initialNonce: nonce,
- packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
- },
- writer: ciphertext,
- }, nil
-}
-
-func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) {
- hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData)
-
- encryptionKey := make([]byte, c.KeySize())
- _, _ = readFull(hkdfReader, encryptionKey)
-
- // Last 64 bits of nonce are the counter
- nonce = make([]byte, mode.IvLength() - 8)
-
- _, _ = readFull(hkdfReader, nonce)
-
- blockCipher := c.new(encryptionKey)
- aead = mode.new(blockCipher)
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go
deleted file mode 100644
index 3e070f8b..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto/cipher"
- "crypto/sha1"
- "crypto/subtle"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// seMdcReader wraps an io.Reader with a no-op Close method.
-type seMdcReader struct {
- in io.Reader
-}
-
-func (ser seMdcReader) Read(buf []byte) (int, error) {
- return ser.in.Read(buf)
-}
-
-func (ser seMdcReader) Close() error {
- return nil
-}
-
-func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) {
- if !c.IsSupported() {
- return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c)))
- }
-
- if len(key) != c.KeySize() {
- return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
- }
-
- if se.prefix == nil {
- se.prefix = make([]byte, c.blockSize()+2)
- _, err := readFull(se.Contents, se.prefix)
- if err != nil {
- return nil, err
- }
- } else if len(se.prefix) != c.blockSize()+2 {
- return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
- }
-
- ocfbResync := OCFBResync
- if se.IntegrityProtected {
- // MDC packets use a different form of OCFB mode.
- ocfbResync = OCFBNoResync
- }
-
- s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
-
- plaintext := cipher.StreamReader{S: s, R: se.Contents}
-
- if se.IntegrityProtected {
- // IntegrityProtected packets have an embedded hash that we need to check.
- h := sha1.New()
- h.Write(se.prefix)
- return &seMDCReader{in: plaintext, h: h}, nil
- }
-
- // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
- return seMdcReader{plaintext}, nil
-}
-
-const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
-
-// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
-// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
-// MDC packet containing a hash of the previous Contents which is checked
-// against the running hash. See RFC 4880, section 5.13.
-type seMDCReader struct {
- in io.Reader
- h hash.Hash
- trailer [mdcTrailerSize]byte
- scratch [mdcTrailerSize]byte
- trailerUsed int
- error bool
- eof bool
-}
-
-func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
- if ser.error {
- err = io.ErrUnexpectedEOF
- return
- }
- if ser.eof {
- err = io.EOF
- return
- }
-
- // If we haven't yet filled the trailer buffer then we must do that
- // first.
- for ser.trailerUsed < mdcTrailerSize {
- n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
- ser.trailerUsed += n
- if err == io.EOF {
- if ser.trailerUsed != mdcTrailerSize {
- n = 0
- err = io.ErrUnexpectedEOF
- ser.error = true
- return
- }
- ser.eof = true
- n = 0
- return
- }
-
- if err != nil {
- n = 0
- return
- }
- }
-
- // If it's a short read then we read into a temporary buffer and shift
- // the data into the caller's buffer.
- if len(buf) <= mdcTrailerSize {
- n, err = readFull(ser.in, ser.scratch[:len(buf)])
- copy(buf, ser.trailer[:n])
- ser.h.Write(buf[:n])
- copy(ser.trailer[:], ser.trailer[n:])
- copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
- if n < len(buf) {
- ser.eof = true
- err = io.EOF
- }
- return
- }
-
- n, err = ser.in.Read(buf[mdcTrailerSize:])
- copy(buf, ser.trailer[:])
- ser.h.Write(buf[:n])
- copy(ser.trailer[:], buf[n:])
-
- if err == io.EOF {
- ser.eof = true
- }
- return
-}
-
-// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet.
-const mdcPacketTagByte = byte(0x80) | 0x40 | 19
-
-func (ser *seMDCReader) Close() error {
- if ser.error {
- return errors.ErrMDCMissing
- }
-
- for !ser.eof {
- // We haven't seen EOF so we need to read to the end
- var buf [1024]byte
- _, err := ser.Read(buf[:])
- if err == io.EOF {
- break
- }
- if err != nil {
- return errors.ErrMDCMissing
- }
- }
-
- ser.h.Write(ser.trailer[:2])
-
- final := ser.h.Sum(nil)
- if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
- return errors.ErrMDCHashMismatch
- }
- // The hash already includes the MDC header, but we still check its value
- // to confirm encryption correctness
- if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
- return errors.ErrMDCMissing
- }
- return nil
-}
-
-// An seMDCWriter writes through to an io.WriteCloser while maintains a running
-// hash of the data written. On close, it emits an MDC packet containing the
-// running hash.
-type seMDCWriter struct {
- w io.WriteCloser
- h hash.Hash
-}
-
-func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
- w.h.Write(buf)
- return w.w.Write(buf)
-}
-
-func (w *seMDCWriter) Close() (err error) {
- var buf [mdcTrailerSize]byte
-
- buf[0] = mdcPacketTagByte
- buf[1] = sha1.Size
- w.h.Write(buf[:2])
- digest := w.h.Sum(nil)
- copy(buf[2:], digest)
-
- _, err = w.w.Write(buf[:])
- if err != nil {
- return
- }
- return w.w.Close()
-}
-
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
-type noOpCloser struct {
- w io.Writer
-}
-
-func (c noOpCloser) Write(data []byte) (n int, err error) {
- return c.w.Write(data)
-}
-
-func (c noOpCloser) Close() error {
- return nil
-}
-
-func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) {
- // Disallow old cipher suites
- if !c.IsSupported() || c < CipherAES128 {
- return nil, errors.InvalidArgumentError("invalid mdc cipher function")
- }
-
- if c.KeySize() != len(key) {
- return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length")
- }
-
- _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc})
- if err != nil {
- return
- }
-
- block := c.new(key)
- blockSize := block.BlockSize()
- iv := make([]byte, blockSize)
- _, err = config.Random().Read(iv)
- if err != nil {
- return
- }
- s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
- _, err = ciphertext.Write(prefix)
- if err != nil {
- return
- }
- plaintext := cipher.StreamWriter{S: s, W: ciphertext}
-
- h := sha1.New()
- h.Write(iv)
- h.Write(iv[blockSize-2:])
- Contents = &seMDCWriter{w: plaintext, h: h}
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go
deleted file mode 100644
index 88ec72c6..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "image"
- "image/jpeg"
- "io"
- "io/ioutil"
-)
-
-const UserAttrImageSubpacket = 1
-
-// UserAttribute is capable of storing other types of data about a user
-// beyond name, email and a text comment. In practice, user attributes are typically used
-// to store a signed thumbnail photo JPEG image of the user.
-// See RFC 4880, section 5.12.
-type UserAttribute struct {
- Contents []*OpaqueSubpacket
-}
-
-// NewUserAttributePhoto creates a user attribute packet
-// containing the given images.
-func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
- uat = new(UserAttribute)
- for _, photo := range photos {
- var buf bytes.Buffer
- // RFC 4880, Section 5.12.1.
- data := []byte{
- 0x10, 0x00, // Little-endian image header length (16 bytes)
- 0x01, // Image header version 1
- 0x01, // JPEG
- 0, 0, 0, 0, // 12 reserved octets, must be all zero.
- 0, 0, 0, 0,
- 0, 0, 0, 0}
- if _, err = buf.Write(data); err != nil {
- return
- }
- if err = jpeg.Encode(&buf, photo, nil); err != nil {
- return
- }
-
- lengthBuf := make([]byte, 5)
- n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1)
- lengthBuf = lengthBuf[:n]
-
- uat.Contents = append(uat.Contents, &OpaqueSubpacket{
- SubType: UserAttrImageSubpacket,
- EncodedLength: lengthBuf,
- Contents: buf.Bytes(),
- })
- }
- return
-}
-
-// NewUserAttribute creates a new user attribute packet containing the given subpackets.
-func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
- return &UserAttribute{Contents: contents}
-}
-
-func (uat *UserAttribute) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.13
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return
- }
- uat.Contents, err = OpaqueSubpackets(b)
- return
-}
-
-// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
-// header.
-func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
- var buf bytes.Buffer
- for _, sp := range uat.Contents {
- err = sp.Serialize(&buf)
- if err != nil {
- return err
- }
- }
- if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
- return err
- }
- _, err = w.Write(buf.Bytes())
- return
-}
-
-// ImageData returns zero or more byte slices, each containing
-// JPEG File Interchange Format (JFIF), for each photo in the
-// user attribute packet.
-func (uat *UserAttribute) ImageData() (imageData [][]byte) {
- for _, sp := range uat.Contents {
- if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
- imageData = append(imageData, sp.Contents[16:])
- }
- }
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go
deleted file mode 100644
index 614fbafd..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
- "io/ioutil"
- "strings"
-)
-
-// UserId contains text that is intended to represent the name and email
-// address of the key holder. See RFC 4880, section 5.11. By convention, this
-// takes the form "Full Name (Comment) "
-type UserId struct {
- Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below.
-
- Name, Comment, Email string
-}
-
-func hasInvalidCharacters(s string) bool {
- for _, c := range s {
- switch c {
- case '(', ')', '<', '>', 0:
- return true
- }
- }
- return false
-}
-
-// NewUserId returns a UserId or nil if any of the arguments contain invalid
-// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
-func NewUserId(name, comment, email string) *UserId {
- // RFC 4880 doesn't deal with the structure of userid strings; the
- // name, comment and email form is just a convention. However, there's
- // no convention about escaping the metacharacters and GPG just refuses
- // to create user ids where, say, the name contains a '('. We mirror
- // this behaviour.
-
- if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
- return nil
- }
-
- uid := new(UserId)
- uid.Name, uid.Comment, uid.Email = name, comment, email
- uid.Id = name
- if len(comment) > 0 {
- if len(uid.Id) > 0 {
- uid.Id += " "
- }
- uid.Id += "("
- uid.Id += comment
- uid.Id += ")"
- }
- if len(email) > 0 {
- if len(uid.Id) > 0 {
- uid.Id += " "
- }
- uid.Id += "<"
- uid.Id += email
- uid.Id += ">"
- }
- return uid
-}
-
-func (uid *UserId) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.11
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return
- }
- uid.Id = string(b)
- uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
- return
-}
-
-// Serialize marshals uid to w in the form of an OpenPGP packet, including
-// header.
-func (uid *UserId) Serialize(w io.Writer) error {
- err := serializeHeader(w, packetTypeUserId, len(uid.Id))
- if err != nil {
- return err
- }
- _, err = w.Write([]byte(uid.Id))
- return err
-}
-
-// parseUserId extracts the name, comment and email from a user id string that
-// is formatted as "Full Name (Comment) ".
-func parseUserId(id string) (name, comment, email string) {
- var n, c, e struct {
- start, end int
- }
- var state int
-
- for offset, rune := range id {
- switch state {
- case 0:
- // Entering name
- n.start = offset
- state = 1
- fallthrough
- case 1:
- // In name
- if rune == '(' {
- state = 2
- n.end = offset
- } else if rune == '<' {
- state = 5
- n.end = offset
- }
- case 2:
- // Entering comment
- c.start = offset
- state = 3
- fallthrough
- case 3:
- // In comment
- if rune == ')' {
- state = 4
- c.end = offset
- }
- case 4:
- // Between comment and email
- if rune == '<' {
- state = 5
- }
- case 5:
- // Entering email
- e.start = offset
- state = 6
- fallthrough
- case 6:
- // In email
- if rune == '>' {
- state = 7
- e.end = offset
- }
- default:
- // After email
- }
- }
- switch state {
- case 1:
- // ended in the name
- n.end = len(id)
- case 3:
- // ended in comment
- c.end = len(id)
- case 6:
- // ended in email
- e.end = len(id)
- }
-
- name = strings.TrimSpace(id[n.start:n.end])
- comment = strings.TrimSpace(id[c.start:c.end])
- email = strings.TrimSpace(id[e.start:e.end])
-
- // RFC 2822 3.4: alternate simple form of a mailbox
- if email == "" && strings.ContainsRune(name, '@') {
- email = name
- name = ""
- }
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
deleted file mode 100644
index e910e184..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
+++ /dev/null
@@ -1,590 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package openpgp implements high level operations on OpenPGP messages.
-package openpgp // import "github.com/ProtonMail/go-crypto/openpgp"
-
-import (
- "crypto"
- _ "crypto/sha256"
- _ "crypto/sha512"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
- _ "golang.org/x/crypto/sha3"
-)
-
-// SignatureType is the armor type for a PGP signature.
-var SignatureType = "PGP SIGNATURE"
-
-// readArmored reads an armored block with the given type.
-func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
- block, err := armor.Decode(r)
- if err != nil {
- return
- }
-
- if block.Type != expectedType {
- return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
- }
-
- return block.Body, nil
-}
-
-// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
-// signed message.
-type MessageDetails struct {
- IsEncrypted bool // true if the message was encrypted.
- EncryptedToKeyIds []uint64 // the list of recipient key ids.
- IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
- DecryptedWith Key // the private key used to decrypt the message, if any.
- IsSigned bool // true if the message is signed.
- SignedByKeyId uint64 // the key id of the signer, if any.
- SignedBy *Key // the key of the signer, if available.
- LiteralData *packet.LiteralData // the metadata of the contents
- UnverifiedBody io.Reader // the contents of the message.
-
- // If IsSigned is true and SignedBy is non-zero then the signature will
- // be verified as UnverifiedBody is read. The signature cannot be
- // checked until the whole of UnverifiedBody is read so UnverifiedBody
- // must be consumed until EOF before the data can be trusted. Even if a
- // message isn't signed (or the signer is unknown) the data may contain
- // an authentication code that is only checked once UnverifiedBody has
- // been consumed. Once EOF has been seen, the following fields are
- // valid. (An authentication code failure is reported as a
- // SignatureError error when reading from UnverifiedBody.)
- Signature *packet.Signature // the signature packet itself.
- SignatureError error // nil if the signature is good.
- UnverifiedSignatures []*packet.Signature // all other unverified signature packets.
-
- decrypted io.ReadCloser
-}
-
-// A PromptFunction is used as a callback by functions that may need to decrypt
-// a private key, or prompt for a passphrase. It is called with a list of
-// acceptable, encrypted private keys and a boolean that indicates whether a
-// passphrase is usable. It should either decrypt a private key or return a
-// passphrase to try. If the decrypted private key or given passphrase isn't
-// correct, the function will be called again, forever. Any error returned will
-// be passed up.
-type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
-
-// A keyEnvelopePair is used to store a private key with the envelope that
-// contains a symmetric key, encrypted with that key.
-type keyEnvelopePair struct {
- key Key
- encryptedKey *packet.EncryptedKey
-}
-
-// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
-// The given KeyRing should contain both public keys (for signature
-// verification) and, possibly encrypted, private keys for decrypting.
-// If config is nil, sensible defaults will be used.
-func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
- var p packet.Packet
-
- var symKeys []*packet.SymmetricKeyEncrypted
- var pubKeys []keyEnvelopePair
- // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted
- var edp packet.EncryptedDataPacket
-
- packets := packet.NewReader(r)
- md = new(MessageDetails)
- md.IsEncrypted = true
-
- // The message, if encrypted, starts with a number of packets
- // containing an encrypted decryption key. The decryption key is either
- // encrypted to a public key, or with a passphrase. This loop
- // collects these packets.
-ParsePackets:
- for {
- p, err = packets.Next()
- if err != nil {
- return nil, err
- }
- switch p := p.(type) {
- case *packet.SymmetricKeyEncrypted:
- // This packet contains the decryption key encrypted with a passphrase.
- md.IsSymmetricallyEncrypted = true
- symKeys = append(symKeys, p)
- case *packet.EncryptedKey:
- // This packet contains the decryption key encrypted to a public key.
- md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
- switch p.Algo {
- case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH:
- break
- default:
- continue
- }
- if keyring != nil {
- var keys []Key
- if p.KeyId == 0 {
- keys = keyring.DecryptionKeys()
- } else {
- keys = keyring.KeysById(p.KeyId)
- }
- for _, k := range keys {
- pubKeys = append(pubKeys, keyEnvelopePair{k, p})
- }
- }
- case *packet.SymmetricallyEncrypted:
- if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() {
- return nil, errors.UnsupportedError("message is not integrity protected")
- }
- edp = p
- break ParsePackets
- case *packet.AEADEncrypted:
- edp = p
- break ParsePackets
- case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
- // This message isn't encrypted.
- if len(symKeys) != 0 || len(pubKeys) != 0 {
- return nil, errors.StructuralError("key material not followed by encrypted message")
- }
- packets.Unread(p)
- return readSignedMessage(packets, nil, keyring, config)
- }
- }
-
- var candidates []Key
- var decrypted io.ReadCloser
-
- // Now that we have the list of encrypted keys we need to decrypt at
- // least one of them or, if we cannot, we need to call the prompt
- // function so that it can decrypt a key or give us a passphrase.
-FindKey:
- for {
- // See if any of the keys already have a private key available
- candidates = candidates[:0]
- candidateFingerprints := make(map[string]bool)
-
- for _, pk := range pubKeys {
- if pk.key.PrivateKey == nil {
- continue
- }
- if !pk.key.PrivateKey.Encrypted {
- if len(pk.encryptedKey.Key) == 0 {
- errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
- if errDec != nil {
- continue
- }
- }
- // Try to decrypt symmetrically encrypted
- decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
- if err != nil && err != errors.ErrKeyIncorrect {
- return nil, err
- }
- if decrypted != nil {
- md.DecryptedWith = pk.key
- break FindKey
- }
- } else {
- fpr := string(pk.key.PublicKey.Fingerprint[:])
- if v := candidateFingerprints[fpr]; v {
- continue
- }
- candidates = append(candidates, pk.key)
- candidateFingerprints[fpr] = true
- }
- }
-
- if len(candidates) == 0 && len(symKeys) == 0 {
- return nil, errors.ErrKeyIncorrect
- }
-
- if prompt == nil {
- return nil, errors.ErrKeyIncorrect
- }
-
- passphrase, err := prompt(candidates, len(symKeys) != 0)
- if err != nil {
- return nil, err
- }
-
- // Try the symmetric passphrase first
- if len(symKeys) != 0 && passphrase != nil {
- for _, s := range symKeys {
- key, cipherFunc, err := s.Decrypt(passphrase)
- // In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc:
- // only for < 5% of cases we will proceed to decrypt the data
- if err == nil {
- decrypted, err = edp.Decrypt(cipherFunc, key)
- if err != nil {
- return nil, err
- }
- if decrypted != nil {
- break FindKey
- }
- }
- }
- }
- }
-
- md.decrypted = decrypted
- if err := packets.Push(decrypted); err != nil {
- return nil, err
- }
- mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config)
- if sensitiveParsingErr != nil {
- return nil, errors.StructuralError("parsing error")
- }
- return mdFinal, nil
-}
-
-// readSignedMessage reads a possibly signed message if mdin is non-zero then
-// that structure is updated and returned. Otherwise a fresh MessageDetails is
-// used.
-func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) {
- if mdin == nil {
- mdin = new(MessageDetails)
- }
- md = mdin
-
- var p packet.Packet
- var h hash.Hash
- var wrappedHash hash.Hash
- var prevLast bool
-FindLiteralData:
- for {
- p, err = packets.Next()
- if err != nil {
- return nil, err
- }
- switch p := p.(type) {
- case *packet.Compressed:
- if err := packets.Push(p.Body); err != nil {
- return nil, err
- }
- case *packet.OnePassSignature:
- if prevLast {
- return nil, errors.UnsupportedError("nested signature packets")
- }
-
- if p.IsLast {
- prevLast = true
- }
-
- h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
- if err != nil {
- md.SignatureError = err
- }
-
- md.IsSigned = true
- md.SignedByKeyId = p.KeyId
- if keyring != nil {
- keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
- if len(keys) > 0 {
- md.SignedBy = &keys[0]
- }
- }
- case *packet.LiteralData:
- md.LiteralData = p
- break FindLiteralData
- }
- }
-
- if md.IsSigned && md.SignatureError == nil {
- md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config}
- } else if md.decrypted != nil {
- md.UnverifiedBody = checkReader{md}
- } else {
- md.UnverifiedBody = md.LiteralData.Body
- }
-
- return md, nil
-}
-
-// hashForSignature returns a pair of hashes that can be used to verify a
-// signature. The signature may specify that the contents of the signed message
-// should be preprocessed (i.e. to normalize line endings). Thus this function
-// returns two hashes. The second should be used to hash the message itself and
-// performs any needed preprocessing.
-func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
- if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok {
- return nil, nil, errors.UnsupportedError("unsupported hash function")
- }
- if !hashFunc.Available() {
- return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc)))
- }
- h := hashFunc.New()
-
- switch sigType {
- case packet.SigTypeBinary:
- return h, h, nil
- case packet.SigTypeText:
- return h, NewCanonicalTextHash(h), nil
- }
-
- return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
-}
-
-// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
-// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
-// MDC checks.
-type checkReader struct {
- md *MessageDetails
-}
-
-func (cr checkReader) Read(buf []byte) (int, error) {
- n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf)
- if sensitiveParsingError == io.EOF {
- mdcErr := cr.md.decrypted.Close()
- if mdcErr != nil {
- return n, mdcErr
- }
- return n, io.EOF
- }
-
- if sensitiveParsingError != nil {
- return n, errors.StructuralError("parsing error")
- }
-
- return n, nil
-}
-
-// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
-// the data as it is read. When it sees an EOF from the underlying io.Reader
-// it parses and checks a trailing Signature packet and triggers any MDC checks.
-type signatureCheckReader struct {
- packets *packet.Reader
- h, wrappedHash hash.Hash
- md *MessageDetails
- config *packet.Config
-}
-
-func (scr *signatureCheckReader) Read(buf []byte) (int, error) {
- n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf)
-
- // Hash only if required
- if scr.md.SignedBy != nil {
- scr.wrappedHash.Write(buf[:n])
- }
-
- if sensitiveParsingError == io.EOF {
- var p packet.Packet
- var readError error
- var sig *packet.Signature
-
- p, readError = scr.packets.Next()
- for readError == nil {
- var ok bool
- if sig, ok = p.(*packet.Signature); ok {
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.Metadata = scr.md.LiteralData
- }
-
- // If signature KeyID matches
- if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId {
- key := scr.md.SignedBy
- signatureError := key.PublicKey.VerifySignature(scr.h, sig)
- if signatureError == nil {
- signatureError = checkSignatureDetails(key, sig, scr.config)
- }
- scr.md.Signature = sig
- scr.md.SignatureError = signatureError
- } else {
- scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig)
- }
- }
-
- p, readError = scr.packets.Next()
- }
-
- if scr.md.SignedBy != nil && scr.md.Signature == nil {
- if scr.md.UnverifiedSignatures == nil {
- scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature")
- } else {
- scr.md.SignatureError = errors.StructuralError("No matching signature found")
- }
- }
-
- // The SymmetricallyEncrypted packet, if any, might have an
- // unsigned hash of its own. In order to check this we need to
- // close that Reader.
- if scr.md.decrypted != nil {
- mdcErr := scr.md.decrypted.Close()
- if mdcErr != nil {
- return n, mdcErr
- }
- }
- return n, io.EOF
- }
-
- if sensitiveParsingError != nil {
- return n, errors.StructuralError("parsing error")
- }
-
- return n, nil
-}
-
-// VerifyDetachedSignature takes a signed file and a detached signature and
-// returns the signature packet and the entity the signature was signed by,
-// if any, and a possible signature verification error.
-// If the signer isn't known, ErrUnknownIssuer is returned.
-func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- var expectedHashes []crypto.Hash
- return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config)
-}
-
-// VerifyDetachedSignatureAndHash performs the same actions as
-// VerifyDetachedSignature and checks that the expected hash functions were used.
-func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config)
-}
-
-// CheckDetachedSignature takes a signed file and a detached signature and
-// returns the entity the signature was signed by, if any, and a possible
-// signature verification error. If the signer isn't known,
-// ErrUnknownIssuer is returned.
-func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) {
- var expectedHashes []crypto.Hash
- return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config)
-}
-
-// CheckDetachedSignatureAndHash performs the same actions as
-// CheckDetachedSignature and checks that the expected hash functions were used.
-func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) {
- _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config)
- return
-}
-
-func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- var issuerKeyId uint64
- var hashFunc crypto.Hash
- var sigType packet.SignatureType
- var keys []Key
- var p packet.Packet
-
- expectedHashesLen := len(expectedHashes)
- packets := packet.NewReader(signature)
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return nil, nil, errors.ErrUnknownIssuer
- }
- if err != nil {
- return nil, nil, err
- }
-
- var ok bool
- sig, ok = p.(*packet.Signature)
- if !ok {
- return nil, nil, errors.StructuralError("non signature packet found")
- }
- if sig.IssuerKeyId == nil {
- return nil, nil, errors.StructuralError("signature doesn't have an issuer")
- }
- issuerKeyId = *sig.IssuerKeyId
- hashFunc = sig.Hash
- sigType = sig.SigType
-
- for i, expectedHash := range expectedHashes {
- if hashFunc == expectedHash {
- break
- }
- if i+1 == expectedHashesLen {
- return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers")
- }
- }
-
- keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
- if len(keys) > 0 {
- break
- }
- }
-
- if len(keys) == 0 {
- panic("unreachable")
- }
-
- h, wrappedHash, err := hashForSignature(hashFunc, sigType)
- if err != nil {
- return nil, nil, err
- }
-
- if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
- return nil, nil, err
- }
-
- for _, key := range keys {
- err = key.PublicKey.VerifySignature(h, sig)
- if err == nil {
- return sig, key.Entity, checkSignatureDetails(&key, sig, config)
- }
- }
-
- return nil, nil, err
-}
-
-// CheckArmoredDetachedSignature performs the same actions as
-// CheckDetachedSignature but expects the signature to be armored.
-func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) {
- body, err := readArmored(signature, SignatureType)
- if err != nil {
- return
- }
-
- return CheckDetachedSignature(keyring, signed, body, config)
-}
-
-// checkSignatureDetails returns an error if:
-// - The signature (or one of the binding signatures mentioned below)
-// has a unknown critical notation data subpacket
-// - The primary key of the signing entity is revoked
-// The signature was signed by a subkey and:
-// - The signing subkey is revoked
-// - The primary identity is revoked
-// - The signature is expired
-// - The primary key of the signing entity is expired according to the
-// primary identity binding signature
-// The signature was signed by a subkey and:
-// - The signing subkey is expired according to the subkey binding signature
-// - The signing subkey binding signature is expired
-// - The signing subkey cross-signature is expired
-// NOTE: The order of these checks is important, as the caller may choose to
-// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never
-// ignore any other errors.
-// TODO: Also return an error if:
-// - The primary key is expired according to a direct-key signature
-// - (For V5 keys only:) The direct-key signature (exists and) is expired
-func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error {
- now := config.Now()
- primaryIdentity := key.Entity.PrimaryIdentity()
- signedBySubKey := key.PublicKey != key.Entity.PrimaryKey
- sigsToCheck := []*packet.Signature{ signature, primaryIdentity.SelfSignature }
- if signedBySubKey {
- sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature)
- }
- for _, sig := range sigsToCheck {
- for _, notation := range sig.Notations {
- if notation.IsCritical && !config.KnownNotation(notation.Name) {
- return errors.SignatureError("unknown critical notation: " + notation.Name)
- }
- }
- }
- if key.Entity.Revoked(now) || // primary key is revoked
- (signedBySubKey && key.Revoked(now)) || // subkey is revoked
- primaryIdentity.Revoked(now) { // primary identity is revoked
- return errors.ErrKeyRevoked
- }
- if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired
- return errors.ErrKeyExpired
- }
- if signedBySubKey {
- if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired
- return errors.ErrKeyExpired
- }
- }
- for _, sig := range sigsToCheck {
- if sig.SigExpired(now) { // any of the relevant signatures are expired
- return errors.ErrSignatureExpired
- }
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go
deleted file mode 100644
index db6dad5c..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go
+++ /dev/null
@@ -1,274 +0,0 @@
-package openpgp
-
-const testKey1KeyId uint64 = 0xA34D7E18C20C31BB
-const testKey3KeyId uint64 = 0x338934250CCC0360
-const testKeyP256KeyId uint64 = 0xd44a2c495918513e
-
-const signedInput = "Signed message\nline 2\nline 3\n"
-const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n"
-
-const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b"
-
-const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77"
-
-const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39"
-
-const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83"
-
-const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817"
-
-// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt
-const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw=="
-
-const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003"
-
-const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000"
-
-const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000"
-
-const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300"
-
-const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200"
-
-const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29"
-
-const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61"
-
-const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61"
-
-const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3"
-
-const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377"
-
-const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857"
-
-const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
-
-const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
-
-const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e"
-
-const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e"
-
-const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Version: GnuPG v1.4.10 (GNU/Linux)
-
-lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp
-idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn
-vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB
-AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X
-0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL
-IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk
-VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn
-gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9
-TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx
-q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz
-dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA
-CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1
-ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+
-eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid
-AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV
-bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK
-/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA
-A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX
-TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc
-lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6
-rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN
-oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8
-QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU
-nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC
-AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp
-BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad
-AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL
-VrM0m72/jnpKo04=
-=zNCn
------END PGP PRIVATE KEY BLOCK-----`
-
-const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Charset: UTF-8
-
-xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4
-sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk
-Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/
-AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD
-24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX
-+WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8
-B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX
-fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA
-FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9
-ex7En5r7rHR5xwX82Msc+Rq9dSyO
-=7MrZ
------END PGP PUBLIC KEY BLOCK-----`
-
-const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003`
-
-const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000`
-
-const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Comment: GPGTools - https://gpgtools.org
-
-mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY
-BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z
-tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0
-JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV
-/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+
-K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H
-JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx
-YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1
-b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi
-UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M
-pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM
-AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz
-786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd
-EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB
-=RZia
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const signedMessageV3 = `-----BEGIN PGP MESSAGE-----
-Comment: GPGTools - https://gpgtools.org
-
-owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP
-q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka
-uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka
-DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d
-iT57d/OhWwA=
-=hG7R
------END PGP MESSAGE-----
-`
-
-// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/
-const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd
-fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA
-Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC
-X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI
-CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9
-M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA
-MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD
-AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF
-GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb
-DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7
-TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw==
-=IiS2
------END PGP PRIVATE KEY BLOCK-----`
-
-// Generated with the above private key
-const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE-----
-Version: OpenPGP.js v4.10.7
-Comment: https://openpgpjs.org
-
-xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf
-bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G
-y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv
-UQdl5MlBka1QSNbMq2Bz7XwNPg4=
-=6lbM
------END PGP MESSAGE-----`
-
-const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
-/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
-/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
-5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
-X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
-9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
-qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
-SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
-vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w
-bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm
-bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN
-aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8
-nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl
-+bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J
-BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK
-chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG
-ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw
-FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln
-cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+
-s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh
-6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ
-sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz
-dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt
-YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ
-1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i
-aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr
-fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF
-gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q
-Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj
-jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF
-qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH
-dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI
-zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/
-0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8
-9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x
-Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH
-UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7
-/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ
-nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl
-owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM
-GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu
-a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0
-M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD
-lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5
-01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb
-hW1Hj9AO8lzggBQ=
-=Nt+N
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE-----
-
-wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8
-N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3
-AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI
-NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv
-KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y
-EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN
-AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb
-UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB
-ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+
-7A5CBid5
-=aQkm
------END PGP SIGNATURE-----
-`
-
-const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE-----
-
-owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x
-U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM
-f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10
-2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n
-vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA=
-=fRXs
------END PGP MESSAGE-----`
-
-const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+
-fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5
-GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0
-JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS
-YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6
-AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki
-Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf
-9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa
-JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag
-Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr
-woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb
-LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA
-SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP
-GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2
-bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X
-W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD
-AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY
-hz3tYjKhoFTKEIq3y3Pp
-=h/aX
------END PGP PUBLIC KEY BLOCK-----`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go
deleted file mode 100644
index d0b85834..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package s2k implements the various OpenPGP string-to-key transforms as
-// specified in RFC 4800 section 3.7.1.
-package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k"
-
-import (
- "crypto"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// Config collects configuration parameters for s2k key-stretching
-// transformations. A nil *Config is valid and results in all default
-// values. Currently, Config is used only by the Serialize function in
-// this package.
-type Config struct {
- // S2KMode is the mode of s2k function.
- // It can be 0 (simple), 1(salted), 3(iterated)
- // 2(reserved) 100-110(private/experimental).
- S2KMode uint8
- // Hash is the default hash function to be used. If
- // nil, SHA256 is used.
- Hash crypto.Hash
- // S2KCount is only used for symmetric encryption. It
- // determines the strength of the passphrase stretching when
- // the said passphrase is hashed to produce a key. S2KCount
- // should be between 65536 and 65011712, inclusive. If Config
- // is nil or S2KCount is 0, the value 16777216 used. Not all
- // values in the above range can be represented. S2KCount will
- // be rounded up to the next representable value if it cannot
- // be encoded exactly. See RFC 4880 Section 3.7.1.3.
- S2KCount int
-}
-
-// Params contains all the parameters of the s2k packet
-type Params struct {
- // mode is the mode of s2k function.
- // It can be 0 (simple), 1(salted), 3(iterated)
- // 2(reserved) 100-110(private/experimental).
- mode uint8
- // hashId is the ID of the hash function used in any of the modes
- hashId byte
- // salt is a byte array to use as a salt in hashing process
- salt []byte
- // countByte is used to determine how many rounds of hashing are to
- // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3.
- countByte byte
-}
-
-func (c *Config) hash() crypto.Hash {
- if c == nil || uint(c.Hash) == 0 {
- return crypto.SHA256
- }
-
- return c.Hash
-}
-
-// EncodedCount get encoded count
-func (c *Config) EncodedCount() uint8 {
- if c == nil || c.S2KCount == 0 {
- return 224 // The common case. Corresponding to 16777216
- }
-
- i := c.S2KCount
-
- switch {
- case i < 65536:
- i = 65536
- case i > 65011712:
- i = 65011712
- }
-
- return encodeCount(i)
-}
-
-// encodeCount converts an iterative "count" in the range 1024 to
-// 65011712, inclusive, to an encoded count. The return value is the
-// octet that is actually stored in the GPG file. encodeCount panics
-// if i is not in the above range (encodedCount above takes care to
-// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
-func encodeCount(i int) uint8 {
- if i < 65536 || i > 65011712 {
- panic("count arg i outside the required range")
- }
-
- for encoded := 96; encoded < 256; encoded++ {
- count := decodeCount(uint8(encoded))
- if count >= i {
- return uint8(encoded)
- }
- }
-
- return 255
-}
-
-// decodeCount returns the s2k mode 3 iterative "count" corresponding to
-// the encoded octet c.
-func decodeCount(c uint8) int {
- return (16 + int(c&15)) << (uint32(c>>4) + 6)
-}
-
-// Simple writes to out the result of computing the Simple S2K function (RFC
-// 4880, section 3.7.1.1) using the given hash and input passphrase.
-func Simple(out []byte, h hash.Hash, in []byte) {
- Salted(out, h, in, nil)
-}
-
-var zero [1]byte
-
-// Salted writes to out the result of computing the Salted S2K function (RFC
-// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
-func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
- done := 0
- var digest []byte
-
- for i := 0; done < len(out); i++ {
- h.Reset()
- for j := 0; j < i; j++ {
- h.Write(zero[:])
- }
- h.Write(salt)
- h.Write(in)
- digest = h.Sum(digest[:0])
- n := copy(out[done:], digest)
- done += n
- }
-}
-
-// Iterated writes to out the result of computing the Iterated and Salted S2K
-// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
-// salt and iteration count.
-func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
- combined := make([]byte, len(in)+len(salt))
- copy(combined, salt)
- copy(combined[len(salt):], in)
-
- if count < len(combined) {
- count = len(combined)
- }
-
- done := 0
- var digest []byte
- for i := 0; done < len(out); i++ {
- h.Reset()
- for j := 0; j < i; j++ {
- h.Write(zero[:])
- }
- written := 0
- for written < count {
- if written+len(combined) > count {
- todo := count - written
- h.Write(combined[:todo])
- written = count
- } else {
- h.Write(combined)
- written += len(combined)
- }
- }
- digest = h.Sum(digest[:0])
- n := copy(out[done:], digest)
- done += n
- }
-}
-
-// Generate generates valid parameters from given configuration.
-// It will enforce salted + hashed s2k method
-func Generate(rand io.Reader, c *Config) (*Params, error) {
- hashId, ok := algorithm.HashToHashId(c.Hash)
- if !ok {
- return nil, errors.UnsupportedError("no such hash")
- }
-
- params := &Params{
- mode: 3, // Enforce iterared + salted method
- hashId: hashId,
- salt: make([]byte, 8),
- countByte: c.EncodedCount(),
- }
-
- if _, err := io.ReadFull(rand, params.salt); err != nil {
- return nil, err
- }
-
- return params, nil
-}
-
-// Parse reads a binary specification for a string-to-key transformation from r
-// and returns a function which performs that transform. If the S2K is a special
-// GNU extension that indicates that the private key is missing, then the error
-// returned is errors.ErrDummyPrivateKey.
-func Parse(r io.Reader) (f func(out, in []byte), err error) {
- params, err := ParseIntoParams(r)
- if err != nil {
- return nil, err
- }
-
- return params.Function()
-}
-
-// ParseIntoParams reads a binary specification for a string-to-key
-// transformation from r and returns a struct describing the s2k parameters.
-func ParseIntoParams(r io.Reader) (params *Params, err error) {
- var buf [9]byte
-
- _, err = io.ReadFull(r, buf[:2])
- if err != nil {
- return
- }
-
- params = &Params{
- mode: buf[0],
- hashId: buf[1],
- }
-
- switch params.mode {
- case 0:
- return params, nil
- case 1:
- _, err = io.ReadFull(r, buf[:8])
- if err != nil {
- return nil, err
- }
-
- params.salt = buf[:8]
- return params, nil
- case 3:
- _, err = io.ReadFull(r, buf[:9])
- if err != nil {
- return nil, err
- }
-
- params.salt = buf[:8]
- params.countByte = buf[8]
- return params, nil
- case 101:
- // This is a GNU extension. See
- // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109
- if _, err = io.ReadFull(r, buf[:4]); err != nil {
- return nil, err
- }
- if buf[0] == 'G' && buf[1] == 'N' && buf[2] == 'U' && buf[3] == 1 {
- return params, nil
- }
- return nil, errors.UnsupportedError("GNU S2K extension")
- }
-
- return nil, errors.UnsupportedError("S2K function")
-}
-
-func (params *Params) Dummy() bool {
- return params != nil && params.mode == 101
-}
-
-func (params *Params) Function() (f func(out, in []byte), err error) {
- if params.Dummy() {
- return nil, errors.ErrDummyPrivateKey("dummy key found")
- }
- hashObj, ok := algorithm.HashIdToHashWithSha1(params.hashId)
- if !ok {
- return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId)))
- }
- if !hashObj.Available() {
- return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj)))
- }
-
- switch params.mode {
- case 0:
- f := func(out, in []byte) {
- Simple(out, hashObj.New(), in)
- }
-
- return f, nil
- case 1:
- f := func(out, in []byte) {
- Salted(out, hashObj.New(), in, params.salt)
- }
-
- return f, nil
- case 3:
- f := func(out, in []byte) {
- Iterated(out, hashObj.New(), in, params.salt, decodeCount(params.countByte))
- }
-
- return f, nil
- }
-
- return nil, errors.UnsupportedError("S2K function")
-}
-
-func (params *Params) Serialize(w io.Writer) (err error) {
- if _, err = w.Write([]byte{params.mode}); err != nil {
- return
- }
- if _, err = w.Write([]byte{params.hashId}); err != nil {
- return
- }
- if params.Dummy() {
- _, err = w.Write(append([]byte("GNU"), 1))
- return
- }
- if params.mode > 0 {
- if _, err = w.Write(params.salt); err != nil {
- return
- }
- if params.mode == 3 {
- _, err = w.Write([]byte{params.countByte})
- }
- }
- return
-}
-
-// Serialize salts and stretches the given passphrase and writes the
-// resulting key into key. It also serializes an S2K descriptor to
-// w. The key stretching can be configured with c, which may be
-// nil. In that case, sensible defaults will be used.
-func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
- params, err := Generate(rand, c)
- if err != nil {
- return err
- }
- err = params.Serialize(w)
- if err != nil {
- return err
- }
-
- f, err := params.Function()
- if err != nil {
- return err
- }
- f(key, passphrase)
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
deleted file mode 100644
index b3ae72f7..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
+++ /dev/null
@@ -1,583 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "crypto"
- "hash"
- "io"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
-)
-
-// DetachSign signs message with the private key from signer (which must
-// already have been decrypted) and writes the signature to w.
-// If config is nil, sensible defaults will be used.
-func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return detachSign(w, signer, message, packet.SigTypeBinary, config)
-}
-
-// ArmoredDetachSign signs message with the private key from signer (which
-// must already have been decrypted) and writes an armored signature to w.
-// If config is nil, sensible defaults will be used.
-func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
- return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
-}
-
-// DetachSignText signs message (after canonicalising the line endings) with
-// the private key from signer (which must already have been decrypted) and
-// writes the signature to w.
-// If config is nil, sensible defaults will be used.
-func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return detachSign(w, signer, message, packet.SigTypeText, config)
-}
-
-// ArmoredDetachSignText signs message (after canonicalising the line endings)
-// with the private key from signer (which must already have been decrypted)
-// and writes an armored signature to w.
-// If config is nil, sensible defaults will be used.
-func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
-}
-
-func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- out, err := armor.Encode(w, SignatureType, nil)
- if err != nil {
- return
- }
- err = detachSign(out, signer, message, sigType, config)
- if err != nil {
- return
- }
- return out.Close()
-}
-
-func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey())
- if !ok {
- return errors.InvalidArgumentError("no valid signing keys")
- }
- if signingKey.PrivateKey == nil {
- return errors.InvalidArgumentError("signing key doesn't have a private key")
- }
- if signingKey.PrivateKey.Encrypted {
- return errors.InvalidArgumentError("signing key is encrypted")
- }
- if _, ok := algorithm.HashToHashId(config.Hash()); !ok {
- return errors.InvalidArgumentError("invalid hash function")
- }
-
- sig := createSignaturePacket(signingKey.PublicKey, sigType, config)
-
- h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
- if err != nil {
- return
- }
- if _, err = io.Copy(wrappedHash, message); err != nil {
- return err
- }
-
- err = sig.Sign(h, signingKey.PrivateKey, config)
- if err != nil {
- return
- }
-
- return sig.Serialize(w)
-}
-
-// FileHints contains metadata about encrypted files. This metadata is, itself,
-// encrypted.
-type FileHints struct {
- // IsBinary can be set to hint that the contents are binary data.
- IsBinary bool
- // FileName hints at the name of the file that should be written. It's
- // truncated to 255 bytes if longer. It may be empty to suggest that the
- // file should not be written to disk. It may be equal to "_CONSOLE" to
- // suggest the data should not be written to disk.
- FileName string
- // ModTime contains the modification time of the file, or the zero time if not applicable.
- ModTime time.Time
-}
-
-// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
-// The resulting WriteCloser must be closed after the contents of the file have
-// been written.
-// If config is nil, sensible defaults will be used.
-func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- if hints == nil {
- hints = &FileHints{}
- }
-
- key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
- if err != nil {
- return
- }
-
- var w io.WriteCloser
- cipherSuite := packet.CipherSuite{
- Cipher: config.Cipher(),
- Mode: config.AEAD().Mode(),
- }
- w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config)
- if err != nil {
- return
- }
-
- literalData := w
- if algo := config.Compression(); algo != packet.CompressionNone {
- var compConfig *packet.CompressionConfig
- if config != nil {
- compConfig = config.CompressionConfig
- }
- literalData, err = packet.SerializeCompressed(w, algo, compConfig)
- if err != nil {
- return
- }
- }
-
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds)
-}
-
-// intersectPreferences mutates and returns a prefix of a that contains only
-// the values in the intersection of a and b. The order of a is preserved.
-func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
- var j int
- for _, v := range a {
- for _, v2 := range b {
- if v == v2 {
- a[j] = v
- j++
- break
- }
- }
- }
-
- return a[:j]
-}
-
-// intersectPreferences mutates and returns a prefix of a that contains only
-// the values in the intersection of a and b. The order of a is preserved.
-func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) {
- var j int
- for _, v := range a {
- for _, v2 := range b {
- if v[0] == v2[0] && v[1] == v2[1] {
- a[j] = v
- j++
- break
- }
- }
- }
-
- return a[:j]
-}
-
-func hashToHashId(h crypto.Hash) uint8 {
- v, ok := algorithm.HashToHashId(h)
- if !ok {
- panic("tried to convert unknown hash")
- }
- return v
-}
-
-// EncryptText encrypts a message to a number of recipients and, optionally,
-// signs it. Optional information is contained in 'hints', also encrypted, that
-// aids the recipients in processing the message. The resulting WriteCloser
-// must be closed after the contents of the file have been written. If config
-// is nil, sensible defaults will be used. The signing is done in text mode.
-func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config)
-}
-
-// Encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config)
-}
-
-// EncryptSplit encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config)
-}
-
-// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config)
-}
-
-// writeAndSign writes the data as a payload package and, optionally, signs
-// it. hints contains optional information, that is also encrypted,
-// that aids the recipients in processing the message. The resulting
-// WriteCloser must be closed after the contents of the file have been
-// written. If config is nil, sensible defaults will be used.
-func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) {
- var signer *packet.PrivateKey
- if signed != nil {
- signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey())
- if !ok {
- return nil, errors.InvalidArgumentError("no valid signing keys")
- }
- signer = signKey.PrivateKey
- if signer == nil {
- return nil, errors.InvalidArgumentError("no private key in signing key")
- }
- if signer.Encrypted {
- return nil, errors.InvalidArgumentError("signing key must be decrypted")
- }
- }
-
- var hash crypto.Hash
- for _, hashId := range candidateHashes {
- if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
- hash = h
- break
- }
- }
-
- // If the hash specified by config is a candidate, we'll use that.
- if configuredHash := config.Hash(); configuredHash.Available() {
- for _, hashId := range candidateHashes {
- if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
- hash = h
- break
- }
- }
- }
-
- if hash == 0 {
- hashId := candidateHashes[0]
- name, ok := algorithm.HashIdToString(hashId)
- if !ok {
- name = "#" + strconv.Itoa(int(hashId))
- }
- return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
- }
-
- if signer != nil {
- ops := &packet.OnePassSignature{
- SigType: sigType,
- Hash: hash,
- PubKeyAlgo: signer.PubKeyAlgo,
- KeyId: signer.KeyId,
- IsLast: true,
- }
- if err := ops.Serialize(payload); err != nil {
- return nil, err
- }
- }
-
- if hints == nil {
- hints = &FileHints{}
- }
-
- w := payload
- if signer != nil {
- // If we need to write a signature packet after the literal
- // data then we need to stop literalData from closing
- // encryptedData.
- w = noOpCloser{w}
-
- }
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
- if err != nil {
- return nil, err
- }
-
- if signer != nil {
- h, wrappedHash, err := hashForSignature(hash, sigType)
- if err != nil {
- return nil, err
- }
- metadata := &packet.LiteralData{
- Format: 't',
- FileName: hints.FileName,
- Time: epochSeconds,
- }
- if hints.IsBinary {
- metadata.Format = 'b'
- }
- return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil
- }
- return literalData, nil
-}
-
-// encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) {
- if len(to) == 0 {
- return nil, errors.InvalidArgumentError("no encryption recipient provided")
- }
-
- // These are the possible ciphers that we'll use for the message.
- candidateCiphers := []uint8{
- uint8(packet.CipherAES256),
- uint8(packet.CipherAES128),
- }
-
- // These are the possible hash functions that we'll use for the signature.
- candidateHashes := []uint8{
- hashToHashId(crypto.SHA256),
- hashToHashId(crypto.SHA384),
- hashToHashId(crypto.SHA512),
- hashToHashId(crypto.SHA3_256),
- hashToHashId(crypto.SHA3_512),
- }
-
- // Prefer GCM if everyone supports it
- candidateCipherSuites := [][2]uint8{
- {uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)},
- {uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)},
- {uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)},
- }
-
- candidateCompression := []uint8{
- uint8(packet.CompressionNone),
- uint8(packet.CompressionZIP),
- uint8(packet.CompressionZLIB),
- }
-
- encryptKeys := make([]Key, len(to))
-
- // AEAD is used only if config enables it and every key supports it
- aeadSupported := config.AEAD() != nil
-
- for i := range to {
- var ok bool
- encryptKeys[i], ok = to[i].EncryptionKey(config.Now())
- if !ok {
- return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys")
- }
-
- sig := to[i].PrimaryIdentity().SelfSignature
- if sig.SEIPDv2 == false {
- aeadSupported = false
- }
-
- candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric)
- candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash)
- candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites)
- candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression)
- }
-
- // In the event that the intersection of supported algorithms is empty we use the ones
- // labelled as MUST that every implementation supports.
- if len(candidateCiphers) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3
- candidateCiphers = []uint8{uint8(packet.CipherAES128)}
- }
- if len(candidateHashes) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos
- candidateHashes = []uint8{hashToHashId(crypto.SHA256)}
- }
- if len(candidateCipherSuites) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6
- candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}}
- }
-
- cipher := packet.CipherFunction(candidateCiphers[0])
- aeadCipherSuite := packet.CipherSuite{
- Cipher: packet.CipherFunction(candidateCipherSuites[0][0]),
- Mode: packet.AEADMode(candidateCipherSuites[0][1]),
- }
-
- // If the cipher specified by config is a candidate, we'll use that.
- configuredCipher := config.Cipher()
- for _, c := range candidateCiphers {
- cipherFunc := packet.CipherFunction(c)
- if cipherFunc == configuredCipher {
- cipher = cipherFunc
- break
- }
- }
-
- symKey := make([]byte, cipher.KeySize())
- if _, err := io.ReadFull(config.Random(), symKey); err != nil {
- return nil, err
- }
-
- for _, key := range encryptKeys {
- if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil {
- return nil, err
- }
- }
-
- var payload io.WriteCloser
- payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config)
- if err != nil {
- return
- }
-
- payload, err = handleCompression(payload, candidateCompression, config)
- if err != nil {
- return nil, err
- }
-
- return writeAndSign(payload, candidateHashes, signed, hints, sigType, config)
-}
-
-// Sign signs a message. The resulting WriteCloser must be closed after the
-// contents of the file have been written. hints contains optional information
-// that aids the recipients in processing the message.
-// If config is nil, sensible defaults will be used.
-func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) {
- if signed == nil {
- return nil, errors.InvalidArgumentError("no signer provided")
- }
-
- // These are the possible hash functions that we'll use for the signature.
- candidateHashes := []uint8{
- hashToHashId(crypto.SHA256),
- hashToHashId(crypto.SHA384),
- hashToHashId(crypto.SHA512),
- hashToHashId(crypto.SHA3_256),
- hashToHashId(crypto.SHA3_512),
- }
- defaultHashes := candidateHashes[0:1]
- preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash
- if len(preferredHashes) == 0 {
- preferredHashes = defaultHashes
- }
- candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
- if len(candidateHashes) == 0 {
- return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes")
- }
-
- return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config)
-}
-
-// signatureWriter hashes the contents of a message while passing it along to
-// literalData. When closed, it closes literalData, writes a signature packet
-// to encryptedData and then also closes encryptedData.
-type signatureWriter struct {
- encryptedData io.WriteCloser
- literalData io.WriteCloser
- hashType crypto.Hash
- wrappedHash hash.Hash
- h hash.Hash
- signer *packet.PrivateKey
- sigType packet.SignatureType
- config *packet.Config
- metadata *packet.LiteralData // V5 signatures protect document metadata
-}
-
-func (s signatureWriter) Write(data []byte) (int, error) {
- s.wrappedHash.Write(data)
- switch s.sigType {
- case packet.SigTypeBinary:
- return s.literalData.Write(data)
- case packet.SigTypeText:
- flag := 0
- return writeCanonical(s.literalData, data, &flag)
- }
- return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType)))
-}
-
-func (s signatureWriter) Close() error {
- sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config)
- sig.Hash = s.hashType
- sig.Metadata = s.metadata
-
- if err := sig.Sign(s.h, s.signer, s.config); err != nil {
- return err
- }
- if err := s.literalData.Close(); err != nil {
- return err
- }
- if err := sig.Serialize(s.encryptedData); err != nil {
- return err
- }
- return s.encryptedData.Close()
-}
-
-func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature {
- sigLifetimeSecs := config.SigLifetime()
- return &packet.Signature{
- Version: signer.Version,
- SigType: sigType,
- PubKeyAlgo: signer.PubKeyAlgo,
- Hash: config.Hash(),
- CreationTime: config.Now(),
- IssuerKeyId: &signer.KeyId,
- IssuerFingerprint: signer.Fingerprint,
- Notations: config.Notations(),
- SigLifetimeSecs: &sigLifetimeSecs,
- }
-}
-
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
-// TODO: we have two of these in OpenPGP packages alone. This probably needs
-// to be promoted somewhere more common.
-type noOpCloser struct {
- w io.Writer
-}
-
-func (c noOpCloser) Write(data []byte) (n int, err error) {
- return c.w.Write(data)
-}
-
-func (c noOpCloser) Close() error {
- return nil
-}
-
-func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) {
- data = compressed
- confAlgo := config.Compression()
- if confAlgo == packet.CompressionNone {
- return
- }
-
- // Set algorithm labelled as MUST as fallback
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4
- finalAlgo := packet.CompressionNone
- // if compression specified by config available we will use it
- for _, c := range candidateCompression {
- if uint8(confAlgo) == c {
- finalAlgo = confAlgo
- break
- }
- }
-
- if finalAlgo != packet.CompressionNone {
- var compConfig *packet.CompressionConfig
- if config != nil {
- compConfig = config.CompressionConfig
- }
- data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig)
- if err != nil {
- return
- }
- }
- return data, nil
-}
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/.golangci.yml b/vendor/github.com/bradleyfalzon/ghinstallation/v2/.golangci.yml
new file mode 100644
index 00000000..44bb8765
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/.golangci.yml
@@ -0,0 +1,37 @@
+version: "2"
+linters:
+ default: none
+ enable:
+ - errcheck
+ - gocritic
+ - gocyclo
+ - gosec
+ - govet
+ - ineffassign
+ - misspell
+ - promlinter
+ - revive
+ - staticcheck
+ - unconvert
+ - unused
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/AUTHORS b/vendor/github.com/bradleyfalzon/ghinstallation/v2/AUTHORS
new file mode 100644
index 00000000..88ca0ddd
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/AUTHORS
@@ -0,0 +1,6 @@
+Billy Lynch
+Bradley Falzon
+Philippe Modard
+Ricardo Chimal, Jr
+Tatsuya Kamohara <17017563+kamontia@users.noreply.github.com>
+rob boll
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/LICENSE b/vendor/github.com/bradleyfalzon/ghinstallation/v2/LICENSE
new file mode 100644
index 00000000..1c508b07
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2019 ghinstallation AUTHORS
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md b/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md
new file mode 100644
index 00000000..cf5ea50b
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md
@@ -0,0 +1,110 @@
+# ghinstallation
+
+[](https://godoc.org/github.com/bradleyfalzon/ghinstallation/v2)
+
+`ghinstallation` provides `Transport`, which implements `http.RoundTripper` to
+provide authentication as an installation for GitHub Apps.
+
+This library is designed to provide automatic authentication for
+https://github.com/google/go-github or your own HTTP client.
+
+See
+https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/about-authentication-options-for-github-apps/
+
+# Installation
+
+Get the package:
+
+```bash
+GO111MODULE=on go get -u github.com/bradleyfalzon/ghinstallation/v2
+```
+
+# GitHub Example
+
+```go
+import "github.com/bradleyfalzon/ghinstallation/v2"
+
+func main() {
+ // Shared transport to reuse TCP connections.
+ tr := http.DefaultTransport
+
+ // Wrap the shared transport for use with the app ID 1 authenticating with installation ID 99.
+ itr, err := ghinstallation.NewKeyFromFile(tr, 1, 99, "2016-10-19.private-key.pem")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Use installation transport with github.com/google/go-github
+ client := github.NewClient(&http.Client{Transport: itr})
+}
+```
+
+You can also use [`New()`](https://pkg.go.dev/github.com/bradleyfalzon/ghinstallation/v2#New) to load a key directly from a `[]byte`.
+
+# GitHub Enterprise Example
+
+For clients using GitHub Enterprise, set the base URL as follows:
+
+```go
+import "github.com/bradleyfalzon/ghinstallation/v2"
+
+const GitHubEnterpriseURL = "https://github.example.com/api/v3"
+
+func main() {
+ // Shared transport to reuse TCP connections.
+ tr := http.DefaultTransport
+
+ // Wrap the shared transport for use with the app ID 1 authenticating with installation ID 99.
+ itr, err := ghinstallation.NewKeyFromFile(tr, 1, 99, "2016-10-19.private-key.pem")
+ if err != nil {
+ log.Fatal(err)
+ }
+ itr.BaseURL = GitHubEnterpriseURL
+
+ // Use installation transport with github.com/google/go-github
+ client := github.NewEnterpriseClient(GitHubEnterpriseURL, GitHubEnterpriseURL, &http.Client{Transport: itr})
+}
+```
+
+## What is app ID and installation ID
+
+`app ID` is the GitHub App ID. \
+You can check as following : \
+Settings > Developer > settings > GitHub App > About item
+
+`installation ID` is a part of WebHook request. \
+You can get the number to check the request. \
+Settings > Developer > settings > GitHub Apps > Advanced > Payload in Request
+tab
+
+```
+WebHook request
+...
+ "installation": {
+ "id": `installation ID`
+ }
+```
+
+# Customizing signing behavior
+
+Users can customize signing behavior by passing in a
+[Signer](https://pkg.go.dev/github.com/bradleyfalzon/ghinstallation/v2#Signer)
+implementation when creating an
+[AppsTransport](https://pkg.go.dev/github.com/bradleyfalzon/ghinstallation/v2#AppsTransport).
+For example, this can be used to create tokens backed by keys in a KMS system.
+
+```go
+signer := &myCustomSigner{
+ key: "https://url/to/key/vault",
+}
+atr := NewAppsTransportWithOptions(http.DefaultTransport, 1, WithSigner(signer))
+tr := NewFromAppsTransport(atr, 99)
+```
+
+# License
+
+[Apache 2.0](LICENSE)
+
+# Dependencies
+
+- [github.com/golang-jwt/jwt-go](https://github.com/golang-jwt/jwt-go)
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go
new file mode 100644
index 00000000..ada64bcf
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go
@@ -0,0 +1,121 @@
+package ghinstallation
+
+import (
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+
+ jwt "github.com/golang-jwt/jwt/v4"
+)
+
+// AppsTransport provides a http.RoundTripper by wrapping an existing
+// http.RoundTripper and provides GitHub Apps authentication as a
+// GitHub App.
+//
+// Client can also be overwritten, and is useful to change to one which
+// provides retry logic if you do experience retryable errors.
+//
+// See https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/about-authentication-options-for-github-apps/
+type AppsTransport struct {
+ BaseURL string // BaseURL is the scheme and host for GitHub API, defaults to https://api.github.com
+ Client Client // Client to use to refresh tokens, defaults to http.Client with provided transport
+ tr http.RoundTripper // tr is the underlying roundtripper being wrapped
+ signer Signer // signer signs JWT tokens.
+ appID int64 // appID is the GitHub App's ID
+}
+
+// NewAppsTransportKeyFromFile returns a AppsTransport using a private key from file.
+func NewAppsTransportKeyFromFile(tr http.RoundTripper, appID int64, privateKeyFile string) (*AppsTransport, error) {
+ privateKey, err := os.ReadFile(privateKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read private key: %s", err)
+ }
+ return NewAppsTransport(tr, appID, privateKey)
+}
+
+// NewAppsTransport returns a AppsTransport using private key. The key is parsed
+// and if any errors occur the error is non-nil.
+//
+// The provided tr http.RoundTripper should be shared between multiple
+// installations to ensure reuse of underlying TCP connections.
+//
+// The returned Transport's RoundTrip method is safe to be used concurrently.
+func NewAppsTransport(tr http.RoundTripper, appID int64, privateKey []byte) (*AppsTransport, error) {
+ key, err := jwt.ParseRSAPrivateKeyFromPEM(privateKey)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse private key: %s", err)
+ }
+ return NewAppsTransportFromPrivateKey(tr, appID, key), nil
+}
+
+// NewAppsTransportFromPrivateKey returns an AppsTransport using a crypto/rsa.(*PrivateKey).
+func NewAppsTransportFromPrivateKey(tr http.RoundTripper, appID int64, key *rsa.PrivateKey) *AppsTransport {
+ return &AppsTransport{
+ BaseURL: apiBaseURL,
+ Client: &http.Client{Transport: tr},
+ tr: tr,
+ signer: NewRSASigner(jwt.SigningMethodRS256, key),
+ appID: appID,
+ }
+}
+
+func NewAppsTransportWithOptions(tr http.RoundTripper, appID int64, opts ...AppsTransportOption) (*AppsTransport, error) {
+ t := &AppsTransport{
+ BaseURL: apiBaseURL,
+ Client: &http.Client{Transport: tr},
+ tr: tr,
+ appID: appID,
+ }
+ for _, fn := range opts {
+ fn(t)
+ }
+
+ if t.signer == nil {
+ return nil, errors.New("no signer provided")
+ }
+
+ return t, nil
+}
+
+// RoundTrip implements http.RoundTripper interface.
+func (t *AppsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ // GitHub rejects expiry and issue timestamps that are not an integer,
+ // while the jwt-go library serializes to fractional timestamps.
+ // Truncate them before passing to jwt-go.
+ iss := time.Now().Add(-30 * time.Second).Truncate(time.Second)
+ exp := iss.Add(2 * time.Minute)
+ claims := &jwt.RegisteredClaims{
+ IssuedAt: jwt.NewNumericDate(iss),
+ ExpiresAt: jwt.NewNumericDate(exp),
+ Issuer: strconv.FormatInt(t.appID, 10),
+ }
+
+ ss, err := t.signer.Sign(claims)
+ if err != nil {
+ return nil, fmt.Errorf("could not sign jwt: %s", err)
+ }
+
+ req.Header.Set("Authorization", "Bearer "+ss)
+ req.Header.Add("Accept", acceptHeader)
+
+ resp, err := t.tr.RoundTrip(req)
+ return resp, err
+}
+
+// AppID returns the appID of the transport
+func (t *AppsTransport) AppID() int64 {
+ return t.appID
+}
+
+type AppsTransportOption func(*AppsTransport)
+
+// WithSigner configures the AppsTransport to use the given Signer for generating JWT tokens.
+func WithSigner(signer Signer) AppsTransportOption {
+ return func(at *AppsTransport) {
+ at.signer = signer
+ }
+}
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go
new file mode 100644
index 00000000..928e10ef
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go
@@ -0,0 +1,33 @@
+package ghinstallation
+
+import (
+ "crypto/rsa"
+
+ jwt "github.com/golang-jwt/jwt/v4"
+)
+
+// Signer is a JWT token signer. This is a wrapper around [jwt.SigningMethod] with predetermined
+// key material.
+type Signer interface {
+ // Sign signs the given claims and returns a JWT token string, as specified
+ // by [jwt.Token.SignedString]
+ Sign(claims jwt.Claims) (string, error)
+}
+
+// RSASigner signs JWT tokens using RSA keys.
+type RSASigner struct {
+ method *jwt.SigningMethodRSA
+ key *rsa.PrivateKey
+}
+
+func NewRSASigner(method *jwt.SigningMethodRSA, key *rsa.PrivateKey) *RSASigner {
+ return &RSASigner{
+ method: method,
+ key: key,
+ }
+}
+
+// Sign signs the JWT claims with the RSA key.
+func (s *RSASigner) Sign(claims jwt.Claims) (string, error) {
+ return jwt.NewWithClaims(s.method, claims).SignedString(s.key)
+}
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go
new file mode 100644
index 00000000..7794dd9b
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go
@@ -0,0 +1,276 @@
+package ghinstallation
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+)
+
+const (
+ acceptHeader = "application/vnd.github.v3+json"
+ apiBaseURL = "https://api.github.com"
+)
+
+// Transport provides a http.RoundTripper by wrapping an existing
+// http.RoundTripper and provides GitHub Apps authentication as an
+// installation.
+//
+// Client can also be overwritten, and is useful to change to one which
+// provides retry logic if you do experience retryable errors.
+//
+// See https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/about-authentication-options-for-github-apps/
+type Transport struct {
+ BaseURL string // BaseURL is the scheme and host for GitHub API, defaults to https://api.github.com
+ Client Client // Client to use to refresh tokens, defaults to http.Client with provided transport
+ tr http.RoundTripper // tr is the underlying roundtripper being wrapped
+ appID int64 // appID is the GitHub App's ID
+ installationID int64 // installationID is the GitHub App Installation ID
+ InstallationTokenOptions *github.InstallationTokenOptions // parameters restrict a token's access
+ appsTransport *AppsTransport
+
+ mu *sync.Mutex // mu protects token
+ token *accessToken // token is the installation's access token
+}
+
+// accessToken is an installation access token response from GitHub
+type accessToken struct {
+ Token string `json:"token"`
+ ExpiresAt time.Time `json:"expires_at"`
+ Permissions github.InstallationPermissions `json:"permissions,omitempty"`
+ Repositories []github.Repository `json:"repositories,omitempty"`
+}
+
+// HTTPError represents a custom error for failing HTTP operations.
+// Example in our usecase: refresh access token operation.
+// It enables the caller to inspect the root cause and response.
+type HTTPError struct {
+ Message string
+ RootCause error
+ InstallationID int64
+ Response *http.Response
+}
+
+func (e *HTTPError) Error() string {
+ return e.Message
+}
+
+// Unwrap implements the standard library's error wrapping. It unwraps to the root cause.
+func (e *HTTPError) Unwrap() error {
+ return e.RootCause
+}
+
+var _ http.RoundTripper = &Transport{}
+
+// NewKeyFromFile returns a Transport using a private key from file.
+func NewKeyFromFile(tr http.RoundTripper, appID, installationID int64, privateKeyFile string) (*Transport, error) {
+ privateKey, err := os.ReadFile(privateKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read private key: %s", err)
+ }
+ return New(tr, appID, installationID, privateKey)
+}
+
+// Client is a HTTP client which sends a http.Request and returns a http.Response
+// or an error.
+type Client interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// New returns an Transport using private key. The key is parsed
+// and if any errors occur the error is non-nil.
+//
+// The provided tr http.RoundTripper should be shared between multiple
+// installations to ensure reuse of underlying TCP connections.
+//
+// The returned Transport's RoundTrip method is safe to be used concurrently.
+func New(tr http.RoundTripper, appID, installationID int64, privateKey []byte) (*Transport, error) {
+ atr, err := NewAppsTransport(tr, appID, privateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewFromAppsTransport(atr, installationID), nil
+}
+
+// NewFromAppsTransport returns a Transport using an existing *AppsTransport.
+func NewFromAppsTransport(atr *AppsTransport, installationID int64) *Transport {
+ return &Transport{
+ BaseURL: atr.BaseURL,
+ Client: &http.Client{Transport: atr.tr},
+ tr: atr.tr,
+ appID: atr.appID,
+ installationID: installationID,
+ appsTransport: atr,
+ mu: &sync.Mutex{},
+ }
+}
+
+// RoundTrip implements http.RoundTripper interface.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+
+ token, err := t.Token(req.Context())
+ if err != nil {
+ return nil, err
+ }
+
+ creq := cloneRequest(req) // per RoundTripper contract
+ creq.Header.Set("Authorization", "token "+token)
+
+ if creq.Header.Get("Accept") == "" { // We only add an "Accept" header to avoid overwriting the expected behavior.
+ creq.Header.Add("Accept", acceptHeader)
+ }
+ reqBodyClosed = true // req.Body is assumed to be closed by the tr RoundTripper.
+ resp, err := t.tr.RoundTrip(creq)
+ return resp, err
+}
+
+func (at *accessToken) getRefreshTime() time.Time {
+ return at.ExpiresAt.Add(-time.Minute)
+}
+
+func (at *accessToken) isExpired() bool {
+ return at == nil || at.getRefreshTime().Before(time.Now())
+}
+
+// Token checks the active token expiration and renews if necessary. Token returns
+// a valid access token. If renewal fails an error is returned.
+func (t *Transport) Token(ctx context.Context) (string, error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.token.isExpired() {
+ // Token is not set or expired/nearly expired, so refresh
+ if err := t.refreshToken(ctx); err != nil {
+ return "", fmt.Errorf("could not refresh installation id %v's token: %w", t.installationID, err)
+ }
+ }
+
+ return t.token.Token, nil
+}
+
+// Permissions returns a transport token's GitHub installation permissions.
+func (t *Transport) Permissions() (github.InstallationPermissions, error) {
+ if t.token == nil {
+ return github.InstallationPermissions{}, fmt.Errorf("Permissions() = nil, err: nil token")
+ }
+ return t.token.Permissions, nil
+}
+
+// Repositories returns a transport token's GitHub repositories.
+func (t *Transport) Repositories() ([]github.Repository, error) {
+ if t.token == nil {
+ return nil, fmt.Errorf("Repositories() = nil, err: nil token")
+ }
+ return t.token.Repositories, nil
+}
+
+// Expiry returns a transport token's expiration time and refresh time. There is a small grace period
+// built in where a token will be refreshed before it expires. expiresAt is the actual token expiry,
+// and refreshAt is when a call to Token() will cause it to be refreshed.
+func (t *Transport) Expiry() (expiresAt time.Time, refreshAt time.Time, err error) {
+ if t.token == nil {
+ return time.Time{}, time.Time{}, errors.New("Expiry() = unknown, err: nil token")
+ }
+ return t.token.ExpiresAt, t.token.getRefreshTime(), nil
+}
+
+// AppID returns the app ID associated with the transport
+func (t *Transport) AppID() int64 {
+ return t.appID
+}
+
+// InstallationID returns the installation ID associated with the transport
+func (t *Transport) InstallationID() int64 {
+ return t.installationID
+}
+
+func (t *Transport) refreshToken(ctx context.Context) error {
+ // Convert InstallationTokenOptions into a ReadWriter to pass as an argument to http.NewRequest.
+ body, err := GetReadWriter(t.InstallationTokenOptions)
+ if err != nil {
+ return fmt.Errorf("could not convert installation token parameters into json: %s", err)
+ }
+
+ requestURL := fmt.Sprintf("%s/app/installations/%v/access_tokens", strings.TrimRight(t.BaseURL, "/"), t.installationID)
+ req, err := http.NewRequest("POST", requestURL, body)
+ if err != nil {
+ return fmt.Errorf("could not create request: %s", err)
+ }
+
+ // Set Content and Accept headers.
+ if body != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+ req.Header.Set("Accept", acceptHeader)
+
+ if ctx != nil {
+ req = req.WithContext(ctx)
+ }
+
+ t.appsTransport.BaseURL = t.BaseURL
+ t.appsTransport.Client = t.Client
+ resp, err := t.appsTransport.RoundTrip(req)
+ e := &HTTPError{
+ RootCause: err,
+ InstallationID: t.installationID,
+ Response: resp,
+ }
+ if err != nil {
+ e.Message = fmt.Sprintf("could not get access_tokens from GitHub API for installation ID %v: %v", t.installationID, err)
+ return e
+ }
+
+ if resp.StatusCode/100 != 2 {
+ e.Message = fmt.Sprintf("received non 2xx response status %q when fetching %v", resp.Status, req.URL)
+ return e
+ }
+ // Closing body late, to provide caller a chance to inspect body in an error / non-200 response status situation
+ defer resp.Body.Close()
+
+ return json.NewDecoder(resp.Body).Decode(&t.token)
+}
+
+// GetReadWriter converts a body interface into an io.ReadWriter object.
+func GetReadWriter(i interface{}) (io.ReadWriter, error) {
+ var buf io.ReadWriter
+ if i != nil {
+ buf = new(bytes.Buffer)
+ enc := json.NewEncoder(buf)
+ err := enc.Encode(i)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return buf, nil
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 8bf0e5b7..33c88305 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index a9e0d45c..78bddf1c 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -19,10 +19,13 @@ const (
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
-// contiguous array of the assembly code.
+// contiguous array for the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
type Digest struct {
v1 uint64
v2 uint64
@@ -33,19 +36,31 @@ type Digest struct {
n int // how much of mem is used
}
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
+// New creates a new Digest with a zero seed.
func New() *Digest {
+ return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
var d Digest
- d.Reset()
+ d.ResetWithSeed(seed)
return &d
}
// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
func (d *Digest) Reset() {
- d.v1 = primes[0] + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -primes[0]
+ d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+ d.v1 = seed + prime1 + prime2
+ d.v2 = seed + prime2
+ d.v3 = seed
+ d.v4 = seed - prime1
d.total = 0
d.n = 0
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index 9216e0a4..78f95f25 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -6,7 +6,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
//
//go:noescape
func Sum64(b []byte) uint64
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 26df13bb..118e49e8 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -3,7 +3,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index e86f1b5f..05f5e7df 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -5,7 +5,7 @@
package xxhash
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 1c1638fd..cf9d42ae 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -33,7 +33,7 @@ import (
//
// See https://github.com/golang/go/issues/42739 for discussion.
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/cloudbase/garm-provider-common/LICENSE
similarity index 99%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
rename to vendor/github.com/cloudbase/garm-provider-common/LICENSE
index 8dada3ed..56ceea9b 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
+++ b/vendor/github.com/cloudbase/garm-provider-common/LICENSE
@@ -178,7 +178,7 @@
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
+ boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright {yyyy} {name of copyright owner}
+ Copyright 2023 Cloudbase Solutions SRL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/cloudconfig.go b/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/cloudconfig.go
deleted file mode 100644
index fe468ec6..00000000
--- a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/cloudconfig.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package cloudconfig
-
-import (
- "crypto/x509"
- "encoding/base64"
- "fmt"
- "strings"
- "sync"
-
- "github.com/cloudbase/garm-provider-common/defaults"
-
- "github.com/pkg/errors"
- "gopkg.in/yaml.v3"
-)
-
-func NewDefaultCloudInitConfig() *CloudInit {
- return &CloudInit{
- PackageUpgrade: true,
- Packages: []string{
- "curl",
- "tar",
- },
- SystemInfo: &SystemInfo{
- DefaultUser: DefaultUser{
- Name: defaults.DefaultUser,
- Home: fmt.Sprintf("/home/%s", defaults.DefaultUser),
- Shell: defaults.DefaultUserShell,
- Groups: defaults.DefaultUserGroups,
- Sudo: "ALL=(ALL) NOPASSWD:ALL",
- },
- },
- }
-}
-
-type DefaultUser struct {
- Name string `yaml:"name"`
- Home string `yaml:"home"`
- Shell string `yaml:"shell"`
- Groups []string `yaml:"groups,omitempty"`
- Sudo string `yaml:"sudo"`
-}
-
-type SystemInfo struct {
- DefaultUser DefaultUser `yaml:"default_user"`
-}
-
-type File struct {
- Encoding string `yaml:"encoding"`
- Content string `yaml:"content"`
- Owner string `yaml:"owner"`
- Path string `yaml:"path"`
- Permissions string `yaml:"permissions"`
-}
-
-type CloudInit struct {
- mux sync.Mutex
-
- PackageUpgrade bool `yaml:"package_upgrade"`
- Packages []string `yaml:"packages,omitempty"`
- SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys,omitempty"`
- SystemInfo *SystemInfo `yaml:"system_info,omitempty"`
- RunCmd []string `yaml:"runcmd,omitempty"`
- WriteFiles []File `yaml:"write_files,omitempty"`
- CACerts CACerts `yaml:"ca-certs,omitempty"`
-}
-
-type CACerts struct {
- RemoveDefaults bool `yaml:"remove-defaults"`
- Trusted []string `yaml:"trusted"`
-}
-
-func (c *CloudInit) AddCACert(cert []byte) error {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- if cert == nil {
- return nil
- }
-
- roots := x509.NewCertPool()
- if ok := roots.AppendCertsFromPEM(cert); !ok {
- return fmt.Errorf("failed to parse CA cert bundle")
- }
- c.CACerts.Trusted = append(c.CACerts.Trusted, string(cert))
-
- return nil
-}
-
-func (c *CloudInit) AddSSHKey(keys ...string) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- // TODO(gabriel-samfira): Validate the SSH public key.
- for _, key := range keys {
- found := false
- for _, val := range c.SSHAuthorizedKeys {
- if val == key {
- found = true
- break
- }
- }
- if !found {
- c.SSHAuthorizedKeys = append(c.SSHAuthorizedKeys, key)
- }
- }
-}
-
-func (c *CloudInit) AddPackage(pkgs ...string) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- for _, pkg := range pkgs {
- found := false
- for _, val := range c.Packages {
- if val == pkg {
- found = true
- break
- }
- }
- if !found {
- c.Packages = append(c.Packages, pkg)
- }
- }
-}
-
-func (c *CloudInit) AddRunCmd(cmd string) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- c.RunCmd = append(c.RunCmd, cmd)
-}
-
-func (c *CloudInit) AddFile(contents []byte, path, owner, permissions string) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- for _, val := range c.WriteFiles {
- if val.Path == path {
- return
- }
- }
-
- file := File{
- Encoding: "b64",
- Content: base64.StdEncoding.EncodeToString(contents),
- Owner: owner,
- Permissions: permissions,
- Path: path,
- }
- c.WriteFiles = append(c.WriteFiles, file)
-}
-
-func (c *CloudInit) Serialize() (string, error) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- ret := []string{
- "#cloud-config",
- }
-
- asYaml, err := yaml.Marshal(c)
- if err != nil {
- return "", errors.Wrap(err, "marshaling to yaml")
- }
-
- ret = append(ret, string(asYaml))
- return strings.Join(ret, "\n"), nil
-}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/templates.go b/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/templates.go
deleted file mode 100644
index b27f5cb8..00000000
--- a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/templates.go
+++ /dev/null
@@ -1,483 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package cloudconfig
-
-import (
- "bytes"
- "fmt"
- "text/template"
-
- "github.com/cloudbase/garm-provider-common/params"
- "github.com/pkg/errors"
-)
-
-var CloudConfigTemplate = `#!/bin/bash
-
-set -e
-set -o pipefail
-
-{{- if .EnableBootDebug }}
-set -x
-{{- end }}
-
-CALLBACK_URL="{{ .CallbackURL }}"
-METADATA_URL="{{ .MetadataURL }}"
-BEARER_TOKEN="{{ .CallbackToken }}"
-
-if [ -z "$METADATA_URL" ];then
- echo "no token is available and METADATA_URL is not set"
- exit 1
-fi
-GITHUB_TOKEN=$(curl --retry 5 --retry-delay 5 --retry-connrefused --fail -s -X GET -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${METADATA_URL}/runner-registration-token/")
-
-function call() {
- PAYLOAD="$1"
- [[ $CALLBACK_URL =~ ^(.*)/status$ ]]
- if [ -z "$BASH_REMATCH" ];then
- CALLBACK_URL="${CALLBACK_URL}/status"
- fi
- curl --retry 5 --retry-delay 5 --retry-connrefused --fail -s -X POST -d "${PAYLOAD}" -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${CALLBACK_URL}" || echo "failed to call home: exit code ($?)"
-}
-
-function sendStatus() {
- MSG="$1"
- call "{\"status\": \"installing\", \"message\": \"$MSG\"}"
-}
-
-function success() {
- MSG="$1"
- ID=$2
- call "{\"status\": \"idle\", \"message\": \"$MSG\", \"agent_id\": $ID}"
-}
-
-function fail() {
- MSG="$1"
- call "{\"status\": \"failed\", \"message\": \"$MSG\"}"
- exit 1
-}
-
-# This will echo the version number in the filename. Given a file name like: actions-runner-osx-x64-2.299.1.tar.gz
-# this will output: 2.299.1
-function getRunnerVersion() {
- FILENAME="{{ .FileName }}"
- [[ $FILENAME =~ ([0-9]+\.[0-9]+\.[0-9+]) ]]
- echo $BASH_REMATCH
-}
-
-function getCachedToolsPath() {
- CACHED_RUNNER="/opt/cache/actions-runner/latest"
- if [ -d "$CACHED_RUNNER" ];then
- echo "$CACHED_RUNNER"
- return 0
- fi
-
- VERSION=$(getRunnerVersion)
- if [ -z "$VERSION" ]; then
- return 0
- fi
-
- CACHED_RUNNER="/opt/cache/actions-runner/$VERSION"
- if [ -d "$CACHED_RUNNER" ];then
- echo "$CACHED_RUNNER"
- return 0
- fi
- return 0
-}
-
-function downloadAndExtractRunner() {
- sendStatus "downloading tools from {{ .DownloadURL }}"
- if [ ! -z "{{ .TempDownloadToken }}" ]; then
- TEMP_TOKEN="Authorization: Bearer {{ .TempDownloadToken }}"
- fi
- curl --retry 5 --retry-delay 5 --retry-connrefused --fail -L -H "${TEMP_TOKEN}" -o "/home/{{ .RunnerUsername }}/{{ .FileName }}" "{{ .DownloadURL }}" || fail "failed to download tools"
- mkdir -p /home/{{ .RunnerUsername }}/actions-runner || fail "failed to create actions-runner folder"
- sendStatus "extracting runner"
- tar xf "/home/{{ .RunnerUsername }}/{{ .FileName }}" -C /home/{{ .RunnerUsername }}/actions-runner/ || fail "failed to extract runner"
- # chown {{ .RunnerUsername }}:{{ .RunnerGroup }} -R /home/{{ .RunnerUsername }}/actions-runner/ || fail "failed to change owner"
-}
-
-TEMP_TOKEN=""
-GH_RUNNER_GROUP="{{.GitHubRunnerGroup}}"
-
-# $RUNNER_GROUP_OPT will be added to the config.sh line. If it's empty, nothing happens
-# if it holds a value, it will be part of the command.
-RUNNER_GROUP_OPT=""
-if [ ! -z $GH_RUNNER_GROUP ];then
- RUNNER_GROUP_OPT="--runnergroup=$GH_RUNNER_GROUP"
-fi
-
-CACHED_RUNNER=$(getCachedToolsPath)
-if [ -z "$CACHED_RUNNER" ];then
- downloadAndExtractRunner
- sendStatus "installing dependencies"
- cd /home/{{ .RunnerUsername }}/actions-runner
- sudo ./bin/installdependencies.sh || fail "failed to install dependencies"
-else
- sendStatus "using cached runner found in $CACHED_RUNNER"
- sudo cp -a "$CACHED_RUNNER" "/home/{{ .RunnerUsername }}/actions-runner"
- sudo chown {{ .RunnerUsername }}:{{ .RunnerGroup }} -R "/home/{{ .RunnerUsername }}/actions-runner" || fail "failed to change owner"
- cd /home/{{ .RunnerUsername }}/actions-runner
-fi
-
-
-sendStatus "configuring runner"
-set +e
-attempt=1
-while true; do
- ERROUT=$(mktemp)
- ./config.sh --unattended --url "{{ .RepoURL }}" --token "$GITHUB_TOKEN" $RUNNER_GROUP_OPT --name "{{ .RunnerName }}" --labels "{{ .RunnerLabels }}" --ephemeral 2>$ERROUT
- if [ $? -eq 0 ]; then
- rm $ERROUT || true
- sendStatus "runner successfully configured after $attempt attempt(s)"
- break
- fi
- LAST_ERR=$(cat $ERROUT)
- echo "$LAST_ERR"
-
- # if the runner is already configured, remove it and try again. In the past configuring a runner
- # managed to register it but timed out later, resulting in an error.
- ./config.sh remove --token "$GITHUB_TOKEN" || true
-
- if [ $attempt -gt 5 ];then
- rm $ERROUT || true
- fail "failed to configure runner: $LAST_ERR"
- fi
-
- sendStatus "failed to configure runner (attempt $attempt): $LAST_ERR (retrying in 5 seconds)"
- attempt=$((attempt+1))
- rm $ERROUT || true
- sleep 5
-done
-set -e
-
-sendStatus "installing runner service"
-sudo ./svc.sh install {{ .RunnerUsername }} || fail "failed to install service"
-
-if [ -e "/sys/fs/selinux" ];then
- sudo chcon -h user_u:object_r:bin_t /home/runner/ || fail "failed to change selinux context"
- sudo chcon -R -h {{ .RunnerUsername }}:object_r:bin_t /home/runner/* || fail "failed to change selinux context"
-fi
-
-sendStatus "starting service"
-sudo ./svc.sh start || fail "failed to start service"
-
-set +e
-AGENT_ID=$(grep "agentId" /home/{{ .RunnerUsername }}/actions-runner/.runner | tr -d -c 0-9)
-if [ $? -ne 0 ];then
- fail "failed to get agent ID"
-fi
-set -e
-
-success "runner successfully installed" $AGENT_ID
-`
-
-var WindowsSetupScriptTemplate = `#ps1_sysnative
-Param(
- [Parameter(Mandatory=$false)]
- [string]$Token="{{.CallbackToken}}"
-)
-
-$ErrorActionPreference="Stop"
-
-function Invoke-FastWebRequest {
- [CmdletBinding()]
- Param(
- [Parameter(Mandatory=$True,ValueFromPipeline=$true,Position=0)]
- [System.Uri]$Uri,
- [Parameter(Position=1)]
- [string]$OutFile,
- [Hashtable]$Headers=@{},
- [switch]$SkipIntegrityCheck=$false
- )
- PROCESS
- {
- if(!([System.Management.Automation.PSTypeName]'System.Net.Http.HttpClient').Type)
- {
- $assembly = [System.Reflection.Assembly]::LoadWithPartialName("System.Net.Http")
- }
-
- if(!$OutFile) {
- $OutFile = $Uri.PathAndQuery.Substring($Uri.PathAndQuery.LastIndexOf("/") + 1)
- if(!$OutFile) {
- throw "The ""OutFile"" parameter needs to be specified"
- }
- }
-
- $fragment = $Uri.Fragment.Trim('#')
- if ($fragment) {
- $details = $fragment.Split("=")
- $algorithm = $details[0]
- $hash = $details[1]
- }
-
- if (!$SkipIntegrityCheck -and $fragment -and (Test-Path $OutFile)) {
- try {
- return (Test-FileIntegrity -File $OutFile -Algorithm $algorithm -ExpectedHash $hash)
- } catch {
- Remove-Item $OutFile
- }
- }
-
- $client = new-object System.Net.Http.HttpClient
- foreach ($k in $Headers.Keys){
- $client.DefaultRequestHeaders.Add($k, $Headers[$k])
- }
- $task = $client.GetStreamAsync($Uri)
- $response = $task.Result
- if($task.IsFaulted) {
- $msg = "Request for URL '{0}' is faulted. Task status: {1}." -f @($Uri, $task.Status)
- if($task.Exception) {
- $msg += "Exception details: {0}" -f @($task.Exception)
- }
- Throw $msg
- }
- $outStream = New-Object IO.FileStream $OutFile, Create, Write, None
-
- try {
- $totRead = 0
- $buffer = New-Object Byte[] 1MB
- while (($read = $response.Read($buffer, 0, $buffer.Length)) -gt 0) {
- $totRead += $read
- $outStream.Write($buffer, 0, $read);
- }
- }
- finally {
- $outStream.Close()
- }
- if(!$SkipIntegrityCheck -and $fragment) {
- Test-FileIntegrity -File $OutFile -Algorithm $algorithm -ExpectedHash $hash
- }
- }
-}
-
-function Import-Certificate() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [string]$CertificatePath,
- [parameter(Mandatory=$true)]
- [System.Security.Cryptography.X509Certificates.StoreLocation]$StoreLocation="LocalMachine",
- [parameter(Mandatory=$true)]
- [System.Security.Cryptography.X509Certificates.StoreName]$StoreName="TrustedPublisher"
- )
- PROCESS
- {
- $store = New-Object System.Security.Cryptography.X509Certificates.X509Store(
- $StoreName, $StoreLocation)
- $store.Open([System.Security.Cryptography.X509Certificates.OpenFlags]::ReadWrite)
- $cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2(
- $CertificatePath)
- $store.Add($cert)
- }
-}
-
-function Invoke-APICall() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [object]$Payload,
- [parameter(Mandatory=$true)]
- [string]$CallbackURL
- )
- PROCESS{
- Invoke-WebRequest -UseBasicParsing -Method Post -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $CallbackURL -Body (ConvertTo-Json $Payload) | Out-Null
- }
-}
-
-function Update-GarmStatus() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [string]$Message,
- [parameter(Mandatory=$true)]
- [string]$CallbackURL
- )
- PROCESS{
- $body = @{
- "status"="installing"
- "message"=$Message
- }
- Invoke-APICall -Payload $body -CallbackURL $CallbackURL | Out-Null
- }
-}
-
-function Invoke-GarmSuccess() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [string]$Message,
- [parameter(Mandatory=$true)]
- [int64]$AgentID,
- [parameter(Mandatory=$true)]
- [string]$CallbackURL
- )
- PROCESS{
- $body = @{
- "status"="idle"
- "message"=$Message
- "agent_id"=$AgentID
- }
- Invoke-APICall -Payload $body -CallbackURL $CallbackURL | Out-Null
- }
-}
-
-function Invoke-GarmFailure() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [string]$Message,
- [parameter(Mandatory=$true)]
- [string]$CallbackURL
- )
- PROCESS{
- $body = @{
- "status"="failed"
- "message"=$Message
- }
- Invoke-APICall -Payload $body -CallbackURL $CallbackURL | Out-Null
- Throw $Message
- }
-}
-
-$PEMData = @"
-{{.CABundle}}
-"@
-$GHRunnerGroup = "{{.GitHubRunnerGroup}}"
-
-function Install-Runner() {
- $CallbackURL="{{.CallbackURL}}"
- if (!$CallbackURL.EndsWith("/status")) {
- $CallbackURL = "$CallbackURL/status"
- }
-
- if ($Token.Length -eq 0) {
- Throw "missing callback authentication token"
- }
- try {
- $MetadataURL="{{.MetadataURL}}"
- $DownloadURL="{{.DownloadURL}}"
- if($MetadataURL -eq ""){
- Throw "missing metadata URL"
- }
-
- if($PEMData.Trim().Length -gt 0){
- Set-Content $env:TMP\garm-ca.pem $PEMData
- Import-Certificate -CertificatePath $env:TMP\garm-ca.pem
- }
-
- $GithubRegistrationToken = Invoke-WebRequest -UseBasicParsing -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $MetadataURL/runner-registration-token/
- Update-GarmStatus -CallbackURL $CallbackURL -Message "downloading tools from $DownloadURL"
-
- $downloadToken="{{.TempDownloadToken}}"
- $DownloadTokenHeaders=@{}
- if ($downloadToken.Length -gt 0) {
- $DownloadTokenHeaders=@{
- "Authorization"="Bearer $downloadToken"
- }
- }
- $downloadPath = Join-Path $env:TMP {{.FileName}}
- Invoke-FastWebRequest -Uri $DownloadURL -OutFile $downloadPath -Headers $DownloadTokenHeaders
-
- $runnerDir = "C:\runner"
- mkdir $runnerDir
-
- Update-GarmStatus -CallbackURL $CallbackURL -Message "extracting runner"
- Add-Type -AssemblyName System.IO.Compression.FileSystem
- [System.IO.Compression.ZipFile]::ExtractToDirectory($downloadPath, "$runnerDir")
- $runnerGroupOpt = ""
- if ($GHRunnerGroup.Length -gt 0){
- $runnerGroupOpt = "--runnergroup $GHRunnerGroup"
- }
- Update-GarmStatus -CallbackURL $CallbackURL -Message "configuring and starting runner"
- cd $runnerDir
- ./config.cmd --unattended --url "{{ .RepoURL }}" --token $GithubRegistrationToken $runnerGroupOpt --name "{{ .RunnerName }}" --labels "{{ .RunnerLabels }}" --ephemeral --runasservice
-
- $agentInfoFile = Join-Path $runnerDir ".runner"
- $agentInfo = ConvertFrom-Json (gc -raw $agentInfoFile)
- Invoke-GarmSuccess -CallbackURL $CallbackURL -Message "runner successfully installed" -AgentID $agentInfo.agentId
- } catch {
- Invoke-GarmFailure -CallbackURL $CallbackURL -Message $_
- }
-}
-Install-Runner
-`
-
-// InstallRunnerParams holds the parameters needed to render the runner install script.
-type InstallRunnerParams struct {
- // FileName is the name of the file that will be downloaded from the download URL.
- // This will be the runner archive downloaded from GitHub.
- FileName string
- // DownloadURL is the URL from which the runner archive will be downloaded.
- DownloadURL string
- // RunnerUsername is the username of the user that will run the runner service.
- RunnerUsername string
- // RunnerGroup is the group of the user that will run the runner service.
- RunnerGroup string
- // RepoURL is the URL or the github repo the github runner agent needs to configure itself.
- RepoURL string
- // MetadataURL is the URL where instances can fetch information needed to set themselves up.
- // This URL is set in the GARM config file.
- MetadataURL string
- // RunnerName is the name of the runner. GARM will use this to register the runner with GitHub.
- RunnerName string
- // RunnerLabels is a comma separated list of labels that will be added to the runner.
- RunnerLabels string
- // CallbackURL is the URL where the instance can send a post, signaling progress or status.
- // This URL is set in the GARM config file.
- CallbackURL string
- // CallbackToken is the token that needs to be set by the instance in the headers in order to call
- // the CallbackURL.
- CallbackToken string
- // TempDownloadToken is the token that needs to be set by the instance in the headers in order to download
- // the githun runner. This is usually needed when using garm against a GHES instance.
- TempDownloadToken string
- // CABundle is a CA certificate bundle which will be sent to instances and which will tipically be installed
- // as a system wide trusted root CA by either cloud-init or whatever mechanism the provider will use to set
- // up the runner.
- CABundle string
- // GitHubRunnerGroup is the github runner group in which the newly installed runner should be added to.
- GitHubRunnerGroup string
- // EnableBootDebug will enable bash debug mode.
- EnableBootDebug bool
- // ExtraContext is a map of extra context that will be passed to the runner install template.
- // This option is useful for situations in which you're supplying your own template and you need
- // to pass in information that is not available in the default template.
- ExtraContext map[string]string
-}
-
-func InstallRunnerScript(installParams InstallRunnerParams, osType params.OSType, tpl string) ([]byte, error) {
- if tpl == "" {
- switch osType {
- case params.Linux:
- tpl = CloudConfigTemplate
- case params.Windows:
- tpl = WindowsSetupScriptTemplate
- default:
- return nil, fmt.Errorf("unsupported os type: %s", osType)
- }
- }
-
- t, err := template.New("").Parse(tpl)
- if err != nil {
- return nil, errors.Wrap(err, "parsing template")
- }
-
- var buf bytes.Buffer
- if err := t.Execute(&buf, installParams); err != nil {
- return nil, errors.Wrap(err, "rendering template")
- }
-
- return buf.Bytes(), nil
-}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go b/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go
deleted file mode 100644
index d9a69a16..00000000
--- a/vendor/github.com/cloudbase/garm-provider-common/cloudconfig/util.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package cloudconfig
-
-import (
- "encoding/json"
- "fmt"
- "sort"
- "strings"
-
- "github.com/cloudbase/garm-provider-common/defaults"
- commonParams "github.com/cloudbase/garm-provider-common/params"
- "github.com/google/go-github/v53/github"
- "github.com/pkg/errors"
-)
-
-// CloudConfigSpec is a struct that holds extra specs that can be used to customize user data.
-type CloudConfigSpec struct {
- // RunnerInstallTemplate can be used to override the default runner install template.
- // If used, the caller is responsible for the correctness of the template as well as the
- // suitability of the template for the target OS.
- RunnerInstallTemplate []byte `json:"runner_install_template"`
- // PreInstallScripts is a map of pre-install scripts that will be run before the
- // runner install script. These will run as root and can be used to prep a generic image
- // before we attempt to install the runner. The key of the map is the name of the script
- // as it will be written to disk. The value is a byte array with the contents of the script.
- //
- // These scripts will be added and run in alphabetical order.
- //
- // On Linux, we will set the executable flag. On Windows, the name matters as Windows looks for an
- // extension to determine if the file is an executable or not. In theory this can hold binaries,
- // but in most cases this will most likely hold scripts. We do not currenly validate the payload,
- // so it's up to the user what they upload here.
- // Caution needs to be exercised when using this feature, as the total size of userdata is limited
- // on most providers.
- PreInstallScripts map[string][]byte `json:"pre_install_scripts"`
- // ExtraContext is a map of extra context that will be passed to the runner install template.
- ExtraContext map[string]string `json:"extra_context"`
-}
-
-func sortMapKeys(m map[string][]byte) []string {
- var keys []string
- for k := range m {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- return keys
-}
-
-// GetSpecs returns the cloud config specific extra specs from the bootstrap params.
-func GetSpecs(bootstrapParams commonParams.BootstrapInstance) (CloudConfigSpec, error) {
- var extraSpecs CloudConfigSpec
- if len(bootstrapParams.ExtraSpecs) == 0 {
- return extraSpecs, nil
- }
-
- if err := json.Unmarshal(bootstrapParams.ExtraSpecs, &extraSpecs); err != nil {
- return CloudConfigSpec{}, errors.Wrap(err, "unmarshaling extra specs")
- }
-
- if extraSpecs.ExtraContext == nil {
- extraSpecs.ExtraContext = map[string]string{}
- }
-
- if extraSpecs.PreInstallScripts == nil {
- extraSpecs.PreInstallScripts = map[string][]byte{}
- }
-
- return extraSpecs, nil
-}
-
-// GetRunnerInstallScript returns the runner install script for the given bootstrap params.
-// This function will return either the default script for the given OS type or will use the supplied template
-// if one is provided.
-func GetRunnerInstallScript(bootstrapParams commonParams.BootstrapInstance, tools github.RunnerApplicationDownload, runnerName string) ([]byte, error) {
- if tools.Filename == nil {
- return nil, fmt.Errorf("missing tools filename")
- }
-
- if tools.DownloadURL == nil {
- return nil, fmt.Errorf("missing tools download URL")
- }
-
- var tempToken string
- if tools.TempDownloadToken != nil {
- tempToken = *tools.TempDownloadToken
- }
-
- extraSpecs, err := GetSpecs(bootstrapParams)
- if err != nil {
- return nil, errors.Wrap(err, "getting specs")
- }
-
- installRunnerParams := InstallRunnerParams{
- FileName: *tools.Filename,
- DownloadURL: *tools.DownloadURL,
- TempDownloadToken: tempToken,
- MetadataURL: bootstrapParams.MetadataURL,
- RunnerUsername: defaults.DefaultUser,
- RunnerGroup: defaults.DefaultUser,
- RepoURL: bootstrapParams.RepoURL,
- RunnerName: runnerName,
- RunnerLabels: strings.Join(bootstrapParams.Labels, ","),
- CallbackURL: bootstrapParams.CallbackURL,
- CallbackToken: bootstrapParams.InstanceToken,
- GitHubRunnerGroup: bootstrapParams.GitHubRunnerGroup,
- ExtraContext: extraSpecs.ExtraContext,
- EnableBootDebug: bootstrapParams.UserDataOptions.EnableBootDebug,
- }
-
- if bootstrapParams.CACertBundle != nil && len(bootstrapParams.CACertBundle) > 0 {
- installRunnerParams.CABundle = string(bootstrapParams.CACertBundle)
- }
-
- installScript, err := InstallRunnerScript(installRunnerParams, bootstrapParams.OSType, string(extraSpecs.RunnerInstallTemplate))
- if err != nil {
- return nil, errors.Wrap(err, "generating script")
- }
-
- return installScript, nil
-}
-
-// GetCloudInitConfig returns the cloud-init specific userdata config. This config can be used on most clouds
-// for most Linux machines. The install runner script must be generated separately either by GetRunnerInstallScript()
-// or some other means.
-func GetCloudInitConfig(bootstrapParams commonParams.BootstrapInstance, installScript []byte) (string, error) {
- extraSpecs, err := GetSpecs(bootstrapParams)
- if err != nil {
- return "", errors.Wrap(err, "getting specs")
- }
-
- cloudCfg := NewDefaultCloudInitConfig()
-
- if bootstrapParams.UserDataOptions.DisableUpdatesOnBoot {
- cloudCfg.PackageUpgrade = false
- cloudCfg.Packages = []string{}
- }
- for _, pkg := range bootstrapParams.UserDataOptions.ExtraPackages {
- cloudCfg.AddPackage(pkg)
- }
-
- if len(extraSpecs.PreInstallScripts) > 0 {
- names := sortMapKeys(extraSpecs.PreInstallScripts)
- for _, name := range names {
- script := extraSpecs.PreInstallScripts[name]
- cloudCfg.AddFile(script, fmt.Sprintf("/garm-pre-install/%s", name), "root:root", "755")
- cloudCfg.AddRunCmd(fmt.Sprintf("/garm-pre-install/%s", name))
- }
- }
- cloudCfg.AddRunCmd("rm -rf /garm-pre-install")
-
- cloudCfg.AddSSHKey(bootstrapParams.SSHKeys...)
- cloudCfg.AddFile(installScript, "/install_runner.sh", "root:root", "755")
- cloudCfg.AddRunCmd(fmt.Sprintf("su -l -c /install_runner.sh %s", defaults.DefaultUser))
- cloudCfg.AddRunCmd("rm -f /install_runner.sh")
- if bootstrapParams.CACertBundle != nil && len(bootstrapParams.CACertBundle) > 0 {
- if err := cloudCfg.AddCACert(bootstrapParams.CACertBundle); err != nil {
- return "", errors.Wrap(err, "adding CA cert bundle")
- }
- }
-
- asStr, err := cloudCfg.Serialize()
- if err != nil {
- return "", errors.Wrap(err, "creating cloud config")
- }
-
- return asStr, nil
-}
-
-// GetCloudConfig is a helper function that generates a cloud-init config for Linux and a powershell script for Windows.
-// In most cases this function should do, but in situations where a more custom approach is needed, you may need to call
-// GetCloudInitConfig() or GetRunnerInstallScript() directly and compose the final userdata in a different way.
-// The extra specs PreInstallScripts is only supported on Linux via cloud-init by this function. On some providers, like Azure
-// Windows initialization scripts are run by creating a separate CustomScriptExtension resource for each individual script.
-// On other clouds it may be different. This function aims to be generic, which is why it only supports the PreInstallScripts
-// via cloud-init.
-func GetCloudConfig(bootstrapParams commonParams.BootstrapInstance, tools github.RunnerApplicationDownload, runnerName string) (string, error) {
- installScript, err := GetRunnerInstallScript(bootstrapParams, tools, runnerName)
- if err != nil {
- return "", errors.Wrap(err, "generating script")
- }
-
- var asStr string
- switch bootstrapParams.OSType {
- case commonParams.Linux:
- cloudCfg, err := GetCloudInitConfig(bootstrapParams, installScript)
- if err != nil {
- return "", errors.Wrap(err, "getting cloud init config")
- }
- return cloudCfg, nil
- case commonParams.Windows:
- asStr = string(installScript)
- default:
- return "", fmt.Errorf("unknown os type: %s", bootstrapParams.OSType)
- }
-
- return asStr, nil
-}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/defaults/defaults.go b/vendor/github.com/cloudbase/garm-provider-common/defaults/defaults.go
index e461d10a..0feda5e3 100644
--- a/vendor/github.com/cloudbase/garm-provider-common/defaults/defaults.go
+++ b/vendor/github.com/cloudbase/garm-provider-common/defaults/defaults.go
@@ -1,3 +1,17 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package defaults
const (
diff --git a/vendor/github.com/cloudbase/garm-provider-common/errors/errors.go b/vendor/github.com/cloudbase/garm-provider-common/errors/errors.go
index 9f98c33a..76e85d9c 100644
--- a/vendor/github.com/cloudbase/garm-provider-common/errors/errors.go
+++ b/vendor/github.com/cloudbase/garm-provider-common/errors/errors.go
@@ -29,9 +29,9 @@ var (
// ErrBadRequest is returned is a malformed request is sent
ErrBadRequest = NewBadRequestError("invalid request")
// ErrTimeout is returned when a timeout occurs.
- ErrTimeout = fmt.Errorf("timed out")
- ErrUnprocessable = fmt.Errorf("cannot process request")
- ErrNoPoolsAvailable = fmt.Errorf("no pools available")
+ ErrTimeout = NewTimeoutError("timed out")
+ ErrUnprocessable = NewUnprocessableError("cannot process request")
+ ErrNoPoolsAvailable = NewNoPoolsAvailableError("no pools available")
)
type baseError struct {
@@ -56,6 +56,15 @@ type ProviderError struct {
baseError
}
+func (p *ProviderError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*ProviderError)
+ return ok
+}
+
// NewMissingSecretError returns a new MissingSecretError
func NewMissingSecretError(msg string, a ...interface{}) error {
return &MissingSecretError{
@@ -70,6 +79,15 @@ type MissingSecretError struct {
baseError
}
+func (p *MissingSecretError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*MissingSecretError)
+ return ok
+}
+
// NewUnauthorizedError returns a new UnauthorizedError
func NewUnauthorizedError(msg string) error {
return &UnauthorizedError{
@@ -84,6 +102,15 @@ type UnauthorizedError struct {
baseError
}
+func (p *UnauthorizedError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*UnauthorizedError)
+ return ok
+}
+
// NewNotFoundError returns a new NotFoundError
func NewNotFoundError(msg string, a ...interface{}) error {
return &NotFoundError{
@@ -98,6 +125,15 @@ type NotFoundError struct {
baseError
}
+func (p *NotFoundError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*NotFoundError)
+ return ok
+}
+
// NewDuplicateUserError returns a new DuplicateUserError
func NewDuplicateUserError(msg string) error {
return &DuplicateUserError{
@@ -112,6 +148,15 @@ type DuplicateUserError struct {
baseError
}
+func (p *DuplicateUserError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*DuplicateUserError)
+ return ok
+}
+
// NewBadRequestError returns a new BadRequestError
func NewBadRequestError(msg string, a ...interface{}) error {
return &BadRequestError{
@@ -126,6 +171,15 @@ type BadRequestError struct {
baseError
}
+func (p *BadRequestError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*BadRequestError)
+ return ok
+}
+
// NewConflictError returns a new ConflictError
func NewConflictError(msg string, a ...interface{}) error {
return &ConflictError{
@@ -139,3 +193,81 @@ func NewConflictError(msg string, a ...interface{}) error {
type ConflictError struct {
baseError
}
+
+func (p *ConflictError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*ConflictError)
+ return ok
+}
+
+// NewTimeoutError returns a new TimoutError
+func NewTimeoutError(msg string, a ...interface{}) error {
+ return &TimoutError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// TimoutError is returned when an operation times out.
+type TimoutError struct {
+ baseError
+}
+
+func (p *TimoutError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*TimoutError)
+ return ok
+}
+
+// NewUnprocessableError returns a new UnprocessableError
+func NewUnprocessableError(msg string, a ...interface{}) error {
+ return &TimoutError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// TimoutError is returned when an operation times out.
+type UnprocessableError struct {
+ baseError
+}
+
+func (p *UnprocessableError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*UnprocessableError)
+ return ok
+}
+
+// NewNoPoolsAvailableError returns a new UnprocessableError
+func NewNoPoolsAvailableError(msg string, a ...interface{}) error {
+ return &TimoutError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// NoPoolsAvailableError is returned when anthere are not pools available.
+type NoPoolsAvailableError struct {
+ baseError
+}
+
+func (p *NoPoolsAvailableError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*NoPoolsAvailableError)
+ return ok
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/execution/commands.go b/vendor/github.com/cloudbase/garm-provider-common/execution/commands.go
deleted file mode 100644
index 4d718a65..00000000
--- a/vendor/github.com/cloudbase/garm-provider-common/execution/commands.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package execution
-
-type ExecutionCommand string
-
-const (
- CreateInstanceCommand ExecutionCommand = "CreateInstance"
- DeleteInstanceCommand ExecutionCommand = "DeleteInstance"
- GetInstanceCommand ExecutionCommand = "GetInstance"
- ListInstancesCommand ExecutionCommand = "ListInstances"
- StartInstanceCommand ExecutionCommand = "StartInstance"
- StopInstanceCommand ExecutionCommand = "StopInstance"
- RemoveAllInstancesCommand ExecutionCommand = "RemoveAllInstances"
-)
diff --git a/vendor/github.com/cloudbase/garm-provider-common/execution/common/commands.go b/vendor/github.com/cloudbase/garm-provider-common/execution/common/commands.go
new file mode 100644
index 00000000..c0e79805
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/execution/common/commands.go
@@ -0,0 +1,99 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/params"
+ "github.com/mattn/go-isatty"
+)
+
+type ExecutionCommand string
+
+const (
+ CreateInstanceCommand ExecutionCommand = "CreateInstance"
+ DeleteInstanceCommand ExecutionCommand = "DeleteInstance"
+ GetInstanceCommand ExecutionCommand = "GetInstance"
+ ListInstancesCommand ExecutionCommand = "ListInstances"
+ StartInstanceCommand ExecutionCommand = "StartInstance"
+ StopInstanceCommand ExecutionCommand = "StopInstance"
+ RemoveAllInstancesCommand ExecutionCommand = "RemoveAllInstances"
+ GetVersionCommand ExecutionCommand = "GetVersion"
+)
+
+// V0.1.1 commands
+const (
+ GetSupportedInterfaceVersionsCommand ExecutionCommand = "GetSupportedInterfaceVersions"
+ ValidatePoolInfoCommand ExecutionCommand = "ValidatePoolInfo"
+ GetConfigJSONSchemaCommand ExecutionCommand = "GetConfigJSONSchema"
+ GetExtraSpecsJSONSchemaCommand ExecutionCommand = "GetExtraSpecsJSONSchema"
+)
+
+const (
+ // ExitCodeNotFound is an exit code that indicates a Not Found error
+ ExitCodeNotFound int = 30
+ // ExitCodeDuplicate is an exit code that indicates a duplicate error
+ ExitCodeDuplicate int = 31
+)
+
+func GetBoostrapParamsFromStdin(c ExecutionCommand) (params.BootstrapInstance, error) {
+ var bootstrapParams params.BootstrapInstance
+ if c == CreateInstanceCommand {
+ if isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd()) {
+ return params.BootstrapInstance{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
+ }
+
+ var data bytes.Buffer
+ if _, err := io.Copy(&data, os.Stdin); err != nil {
+ return params.BootstrapInstance{}, fmt.Errorf("failed to copy bootstrap params")
+ }
+
+ if data.Len() == 0 {
+ return params.BootstrapInstance{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
+ }
+
+ if err := json.Unmarshal(data.Bytes(), &bootstrapParams); err != nil {
+ return params.BootstrapInstance{}, fmt.Errorf("failed to decode instance params: %w", err)
+ }
+ if bootstrapParams.ExtraSpecs == nil {
+ // Initialize ExtraSpecs as an empty JSON object
+ bootstrapParams.ExtraSpecs = json.RawMessage([]byte("{}"))
+ }
+
+ return bootstrapParams, nil
+ }
+
+ // If the command is not CreateInstance, we don't need to read from stdin
+ return params.BootstrapInstance{}, nil
+}
+
+func ResolveErrorToExitCode(err error) int {
+ if err != nil {
+ if errors.Is(err, gErrors.ErrNotFound) {
+ return ExitCodeNotFound
+ } else if errors.Is(err, gErrors.ErrDuplicateEntity) {
+ return ExitCodeDuplicate
+ }
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/execution/interface.go b/vendor/github.com/cloudbase/garm-provider-common/execution/common/interface.go
similarity index 58%
rename from vendor/github.com/cloudbase/garm-provider-common/execution/interface.go
rename to vendor/github.com/cloudbase/garm-provider-common/execution/common/interface.go
index 20188368..d00afe92 100644
--- a/vendor/github.com/cloudbase/garm-provider-common/execution/interface.go
+++ b/vendor/github.com/cloudbase/garm-provider-common/execution/common/interface.go
@@ -1,4 +1,18 @@
-package execution
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
import (
"context"
@@ -6,7 +20,7 @@ import (
"github.com/cloudbase/garm-provider-common/params"
)
-// ExternalProvider defines an interface that external providers need to implement.
+// ExternalProvider defines a common interface that external providers need to implement.
// This is very similar to the common.Provider interface, and was redefined here to
// decouple it, in case it may diverge from native providers.
type ExternalProvider interface {
@@ -24,4 +38,6 @@ type ExternalProvider interface {
Stop(ctx context.Context, instance string, force bool) error
// Start boots up an instance.
Start(ctx context.Context, instance string) error
+ // GetVersion returns the version of the provider.
+ GetVersion(ctx context.Context) string
}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/execution/common/versions.go b/vendor/github.com/cloudbase/garm-provider-common/execution/common/versions.go
new file mode 100644
index 00000000..ebdbbb8c
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/execution/common/versions.go
@@ -0,0 +1,22 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+const (
+ // Version v0.1.0
+ Version010 = "v0.1.0"
+ // Version v0.1.1
+ Version011 = "v0.1.1"
+)
diff --git a/vendor/github.com/cloudbase/garm-provider-common/execution/execution.go b/vendor/github.com/cloudbase/garm-provider-common/execution/execution.go
deleted file mode 100644
index 448ea84e..00000000
--- a/vendor/github.com/cloudbase/garm-provider-common/execution/execution.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package execution
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "os"
-
- gErrors "github.com/cloudbase/garm-provider-common/errors"
- "github.com/cloudbase/garm-provider-common/params"
-
- "github.com/mattn/go-isatty"
-)
-
-const (
- // ExitCodeNotFound is an exit code that indicates a Not Found error
- ExitCodeNotFound int = 30
- // ExitCodeDuplicate is an exit code that indicates a duplicate error
- ExitCodeDuplicate int = 31
-)
-
-func ResolveErrorToExitCode(err error) int {
- if err != nil {
- if errors.Is(err, gErrors.ErrNotFound) {
- return ExitCodeNotFound
- } else if errors.Is(err, gErrors.ErrDuplicateEntity) {
- return ExitCodeDuplicate
- }
- return 1
- }
- return 0
-}
-
-func GetEnvironment() (Environment, error) {
- env := Environment{
- Command: ExecutionCommand(os.Getenv("GARM_COMMAND")),
- ControllerID: os.Getenv("GARM_CONTROLLER_ID"),
- PoolID: os.Getenv("GARM_POOL_ID"),
- ProviderConfigFile: os.Getenv("GARM_PROVIDER_CONFIG_FILE"),
- InstanceID: os.Getenv("GARM_INSTANCE_ID"),
- }
-
- // If this is a CreateInstance command, we need to get the bootstrap params
- // from stdin
- if env.Command == CreateInstanceCommand {
- if isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd()) {
- return Environment{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
- }
-
- var data bytes.Buffer
- if _, err := io.Copy(&data, os.Stdin); err != nil {
- return Environment{}, fmt.Errorf("failed to copy bootstrap params")
- }
-
- if data.Len() == 0 {
- return Environment{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
- }
-
- var bootstrapParams params.BootstrapInstance
- if err := json.Unmarshal(data.Bytes(), &bootstrapParams); err != nil {
- return Environment{}, fmt.Errorf("failed to decode instance params: %w", err)
- }
- env.BootstrapParams = bootstrapParams
- }
-
- if err := env.Validate(); err != nil {
- return Environment{}, fmt.Errorf("failed to validate execution environment: %w", err)
- }
-
- return env, nil
-}
-
-type Environment struct {
- Command ExecutionCommand
- ControllerID string
- PoolID string
- ProviderConfigFile string
- InstanceID string
- BootstrapParams params.BootstrapInstance
-}
-
-func (e Environment) Validate() error {
- if e.Command == "" {
- return fmt.Errorf("missing GARM_COMMAND")
- }
-
- if e.ProviderConfigFile == "" {
- return fmt.Errorf("missing GARM_PROVIDER_CONFIG_FILE")
- }
-
- if _, err := os.Lstat(e.ProviderConfigFile); err != nil {
- return fmt.Errorf("error accessing config file: %w", err)
- }
-
- if e.ControllerID == "" {
- return fmt.Errorf("missing GARM_CONTROLLER_ID")
- }
-
- switch e.Command {
- case CreateInstanceCommand:
- if e.BootstrapParams.Name == "" {
- return fmt.Errorf("missing bootstrap params")
- }
- if e.ControllerID == "" {
- return fmt.Errorf("missing controller ID")
- }
- if e.PoolID == "" {
- return fmt.Errorf("missing pool ID")
- }
- case DeleteInstanceCommand, GetInstanceCommand,
- StartInstanceCommand, StopInstanceCommand:
- if e.InstanceID == "" {
- return fmt.Errorf("missing instance ID")
- }
- case ListInstancesCommand:
- if e.PoolID == "" {
- return fmt.Errorf("missing pool ID")
- }
- case RemoveAllInstancesCommand:
- if e.ControllerID == "" {
- return fmt.Errorf("missing controller ID")
- }
- default:
- return fmt.Errorf("unknown GARM_COMMAND: %s", e.Command)
- }
- return nil
-}
-
-func Run(ctx context.Context, provider ExternalProvider, env Environment) (string, error) {
- var ret string
- switch env.Command {
- case CreateInstanceCommand:
- instance, err := provider.CreateInstance(ctx, env.BootstrapParams)
- if err != nil {
- return "", fmt.Errorf("failed to create instance in provider: %w", err)
- }
-
- asJs, err := json.Marshal(instance)
- if err != nil {
- return "", fmt.Errorf("failed to marshal response: %w", err)
- }
- ret = string(asJs)
- case GetInstanceCommand:
- instance, err := provider.GetInstance(ctx, env.InstanceID)
- if err != nil {
- return "", fmt.Errorf("failed to get instance from provider: %w", err)
- }
- asJs, err := json.Marshal(instance)
- if err != nil {
- return "", fmt.Errorf("failed to marshal response: %w", err)
- }
- ret = string(asJs)
- case ListInstancesCommand:
- instances, err := provider.ListInstances(ctx, env.PoolID)
- if err != nil {
- return "", fmt.Errorf("failed to list instances from provider: %w", err)
- }
- asJs, err := json.Marshal(instances)
- if err != nil {
- return "", fmt.Errorf("failed to marshal response: %w", err)
- }
- ret = string(asJs)
- case DeleteInstanceCommand:
- if err := provider.DeleteInstance(ctx, env.InstanceID); err != nil {
- return "", fmt.Errorf("failed to delete instance from provider: %w", err)
- }
- case RemoveAllInstancesCommand:
- if err := provider.RemoveAllInstances(ctx); err != nil {
- return "", fmt.Errorf("failed to destroy environment: %w", err)
- }
- case StartInstanceCommand:
- if err := provider.Start(ctx, env.InstanceID); err != nil {
- return "", fmt.Errorf("failed to start instance: %w", err)
- }
- case StopInstanceCommand:
- if err := provider.Stop(ctx, env.InstanceID, true); err != nil {
- return "", fmt.Errorf("failed to stop instance: %w", err)
- }
- default:
- return "", fmt.Errorf("invalid command: %s", env.Command)
- }
- return ret, nil
-}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/params/github.go b/vendor/github.com/cloudbase/garm-provider-common/params/github.go
new file mode 100644
index 00000000..c3e9a0a4
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/params/github.go
@@ -0,0 +1,75 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package params
+
+// RunnerApplicationDownload represents a binary for the self-hosted runner application that can be downloaded.
+// This is copied from the go-github package. It does not make sense to create a dependency on go-github just
+// for this struct.
+type RunnerApplicationDownload struct {
+ OS *string `json:"os,omitempty"`
+ Architecture *string `json:"architecture,omitempty"`
+ DownloadURL *string `json:"download_url,omitempty"`
+ Filename *string `json:"filename,omitempty"`
+ TempDownloadToken *string `json:"temp_download_token,omitempty"`
+ SHA256Checksum *string `json:"sha256_checksum,omitempty"`
+}
+
+// GetArchitecture returns the Architecture field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetArchitecture() string {
+ if r == nil || r.Architecture == nil {
+ return ""
+ }
+ return *r.Architecture
+}
+
+// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetDownloadURL() string {
+ if r == nil || r.DownloadURL == nil {
+ return ""
+ }
+ return *r.DownloadURL
+}
+
+// GetFilename returns the Filename field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetFilename() string {
+ if r == nil || r.Filename == nil {
+ return ""
+ }
+ return *r.Filename
+}
+
+// GetOS returns the OS field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetOS() string {
+ if r == nil || r.OS == nil {
+ return ""
+ }
+ return *r.OS
+}
+
+// GetSHA256Checksum returns the SHA256Checksum field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetSHA256Checksum() string {
+ if r == nil || r.SHA256Checksum == nil {
+ return ""
+ }
+ return *r.SHA256Checksum
+}
+
+// GetTempDownloadToken returns the TempDownloadToken field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetTempDownloadToken() string {
+ if r == nil || r.TempDownloadToken == nil {
+ return ""
+ }
+ return *r.TempDownloadToken
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/params/params.go b/vendor/github.com/cloudbase/garm-provider-common/params/params.go
index ccee9859..0a63f709 100644
--- a/vendor/github.com/cloudbase/garm-provider-common/params/params.go
+++ b/vendor/github.com/cloudbase/garm-provider-common/params/params.go
@@ -1,9 +1,21 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package params
import (
"encoding/json"
-
- "github.com/google/go-github/v53/github"
)
type (
@@ -27,14 +39,16 @@ const (
)
const (
- InstanceRunning InstanceStatus = "running"
- InstanceStopped InstanceStatus = "stopped"
- InstanceError InstanceStatus = "error"
- InstancePendingDelete InstanceStatus = "pending_delete"
- InstanceDeleting InstanceStatus = "deleting"
- InstancePendingCreate InstanceStatus = "pending_create"
- InstanceCreating InstanceStatus = "creating"
- InstanceStatusUnknown InstanceStatus = "unknown"
+ InstanceRunning InstanceStatus = "running"
+ InstanceStopped InstanceStatus = "stopped"
+ InstanceError InstanceStatus = "error"
+ InstancePendingDelete InstanceStatus = "pending_delete"
+ InstancePendingForceDelete InstanceStatus = "pending_force_delete"
+ InstanceDeleting InstanceStatus = "deleting"
+ InstanceDeleted InstanceStatus = "deleted"
+ InstancePendingCreate InstanceStatus = "pending_create"
+ InstanceCreating InstanceStatus = "creating"
+ InstanceStatusUnknown InstanceStatus = "unknown"
)
const (
@@ -49,8 +63,8 @@ type UserDataOptions struct {
}
type BootstrapInstance struct {
- Name string `json:"name"`
- Tools []*github.RunnerApplicationDownload `json:"tools"`
+ Name string `json:"name"`
+ Tools []RunnerApplicationDownload `json:"tools"`
// RepoURL is the URL the github runner agent needs to configure itself.
RepoURL string `json:"repo_url"`
// CallbackUrl is the URL where the instance can send a post, signaling
@@ -104,6 +118,12 @@ type BootstrapInstance struct {
// UserDataOptions are the options for the user data generation.
UserDataOptions UserDataOptions `json:"user_data_options"`
+
+ // JitConfigEnabled is a flag that indicates if the runner should be configured to use
+ // just-in-time configuration. If set to true, providers must attempt to fetch the JIT configuration
+ // from the metadata service instead of the runner registration token. The runner registration token
+ // is not available if the runner is configured to use JIT.
+ JitConfigEnabled bool `json:"jit_config_enabled"`
}
type Address struct {
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec.go b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec.go
index 654b0955..cc417f6d 100644
--- a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec.go
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec.go
@@ -1,3 +1,17 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package exec
import (
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_nix.go b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_nix.go
index 1525eca6..4aaea613 100644
--- a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_nix.go
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_nix.go
@@ -1,6 +1,20 @@
//go:build !windows
// +build !windows
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package exec
import (
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_windows.go b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_windows.go
index 0c17839c..dfcc6225 100644
--- a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_windows.go
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_windows.go
@@ -1,3 +1,17 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package exec
import (
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/seal.go b/vendor/github.com/cloudbase/garm-provider-common/util/seal.go
new file mode 100644
index 00000000..0033ce47
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/seal.go
@@ -0,0 +1,170 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/minio/sio"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/hkdf"
+)
+
+type Envelope struct {
+ Nonce [32]byte `json:"nonce"`
+ Data []byte `json:"data"`
+}
+
+// Seal will encrypt the given data using a derived key from the given passphrase.
+// This function is meant to be used with small datasets like passwords, keys and
+// secrets of any type, before they are saved to disk.
+func Seal(data []byte, passphrase []byte) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ var nonce [32]byte
+ if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
+ return nil, fmt.Errorf("failed to read random data: %w", err)
+ }
+
+ // derive an encryption key from the master key and the nonce
+ var key [32]byte
+ kdf := hkdf.New(sha256.New, passphrase, nonce[:], nil)
+ if _, err := io.ReadFull(kdf, key[:]); err != nil {
+ return nil, fmt.Errorf("failed to derive encryption key: %w", err)
+ }
+
+ input := bytes.NewReader(data)
+ output := bytes.NewBuffer(nil)
+
+ if _, err := sio.Encrypt(output, input, sio.Config{Key: key[:]}); err != nil {
+ return nil, fmt.Errorf("failed to encrypt data: %w", err)
+ }
+ envelope := Envelope{
+ Data: output.Bytes(),
+ Nonce: nonce,
+ }
+ asJs, err := json.Marshal(envelope)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal envelope: %w", err)
+ }
+ return asJs, nil
+}
+
+// Unseal will decrypt the given data using a derived key from the given passphrase.
+// This function is meant to be used with small datasets like passwords, keys and
+// secrets of any type, after they are read from disk.
+func Unseal(data []byte, passphrase []byte) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ var envelope Envelope
+ if err := json.Unmarshal(data, &envelope); err != nil {
+ return Aes256Decode(data, string(passphrase))
+ }
+
+ // derive an encryption key from the master key and the nonce
+ var key [32]byte
+ kdf := hkdf.New(sha256.New, passphrase, envelope.Nonce[:], nil)
+ if _, err := io.ReadFull(kdf, key[:]); err != nil {
+ return nil, fmt.Errorf("failed to derive encryption key: %w", err)
+ }
+
+ input := bytes.NewReader(envelope.Data)
+ output := bytes.NewBuffer(nil)
+
+ if _, err := sio.Decrypt(output, input, sio.Config{Key: key[:]}); err != nil {
+ return nil, fmt.Errorf("failed to decrypt data: %w", err)
+ }
+
+ return output.Bytes(), nil
+}
+
+func Aes256Encode(target []byte, passphrase string) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ block, err := aes.NewCipher([]byte(passphrase))
+ if err != nil {
+ return nil, errors.Wrap(err, "creating cipher")
+ }
+
+ aesgcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, errors.Wrap(err, "creating new aead")
+ }
+
+ nonce := make([]byte, aesgcm.NonceSize())
+ if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
+ return nil, errors.Wrap(err, "creating nonce")
+ }
+
+ ciphertext := aesgcm.Seal(nonce, nonce, target, nil)
+ return ciphertext, nil
+}
+
+func Aes256EncodeString(target string, passphrase string) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ return Aes256Encode([]byte(target), passphrase)
+}
+
+func Aes256Decode(target []byte, passphrase string) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ block, err := aes.NewCipher([]byte(passphrase))
+ if err != nil {
+ return nil, errors.Wrap(err, "creating cipher")
+ }
+
+ aesgcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, errors.Wrap(err, "creating new aead")
+ }
+
+ nonceSize := aesgcm.NonceSize()
+ if len(target) < nonceSize {
+ return nil, fmt.Errorf("failed to decrypt text")
+ }
+
+ nonce, ciphertext := target[:nonceSize], target[nonceSize:]
+ plaintext, err := aesgcm.Open(nil, nonce, ciphertext, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decrypt text")
+ }
+ return plaintext, nil
+}
+
+func Aes256DecodeString(target []byte, passphrase string) (string, error) {
+ data, err := Aes256Decode(target, passphrase)
+ if err != nil {
+ return "", err
+ }
+ return string(data), nil
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/util.go b/vendor/github.com/cloudbase/garm-provider-common/util/util.go
index 1f9e9415..36ce09ba 100644
--- a/vendor/github.com/cloudbase/garm-provider-common/util/util.go
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/util.go
@@ -17,8 +17,6 @@ package util
import (
"bytes"
"compress/gzip"
- "crypto/aes"
- "crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/binary"
@@ -34,10 +32,8 @@ import (
"unicode/utf16"
runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/params"
- commonParams "github.com/cloudbase/garm-provider-common/params"
-
- "github.com/google/go-github/v53/github"
"github.com/google/uuid"
gorillaHandlers "github.com/gorilla/handlers"
"github.com/pkg/errors"
@@ -52,24 +48,24 @@ const alphanumeric = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuv
var rxEmail = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
var (
- OSToOSTypeMap map[string]commonParams.OSType = map[string]commonParams.OSType{
- "almalinux": commonParams.Linux,
- "alma": commonParams.Linux,
- "alpine": commonParams.Linux,
- "archlinux": commonParams.Linux,
- "arch": commonParams.Linux,
- "centos": commonParams.Linux,
- "ubuntu": commonParams.Linux,
- "rhel": commonParams.Linux,
- "suse": commonParams.Linux,
- "opensuse": commonParams.Linux,
- "fedora": commonParams.Linux,
- "debian": commonParams.Linux,
- "flatcar": commonParams.Linux,
- "gentoo": commonParams.Linux,
- "rockylinux": commonParams.Linux,
- "rocky": commonParams.Linux,
- "windows": commonParams.Windows,
+ OSToOSTypeMap map[string]params.OSType = map[string]params.OSType{
+ "almalinux": params.Linux,
+ "alma": params.Linux,
+ "alpine": params.Linux,
+ "archlinux": params.Linux,
+ "arch": params.Linux,
+ "centos": params.Linux,
+ "ubuntu": params.Linux,
+ "rhel": params.Linux,
+ "suse": params.Linux,
+ "opensuse": params.Linux,
+ "fedora": params.Linux,
+ "debian": params.Linux,
+ "flatcar": params.Linux,
+ "gentoo": params.Linux,
+ "rockylinux": params.Linux,
+ "rocky": params.Linux,
+ "windows": params.Windows,
}
githubArchMapping map[string]string = map[string]string{
@@ -88,9 +84,9 @@ var (
}
//
- githubOSTag = map[commonParams.OSType]string{
- commonParams.Linux: "Linux",
- commonParams.Windows: "Windows",
+ githubOSTag = map[params.OSType]string{
+ params.Linux: "Linux",
+ params.Windows: "Windows",
}
)
@@ -121,7 +117,7 @@ func ResolveToGithubOSType(osType string) (string, error) {
// ResolveToGithubTag returns the default OS tag that self hosted runners automatically
// (and forcefully) adds to every runner that gets deployed. We need to keep track of those
// tags internally as well.
-func ResolveToGithubTag(os commonParams.OSType) (string, error) {
+func ResolveToGithubTag(os params.OSType) (string, error) {
ghOS, ok := githubOSTag[os]
if !ok {
return "", runnerErrors.NewNotFoundError("os %s is unknown", os)
@@ -180,37 +176,34 @@ func ConvertFileToBase64(file string) (string, error) {
return base64.StdEncoding.EncodeToString(bytes), nil
}
-func OSToOSType(os string) (commonParams.OSType, error) {
+func OSToOSType(os string) (params.OSType, error) {
osType, ok := OSToOSTypeMap[strings.ToLower(os)]
if !ok {
- return commonParams.Unknown, fmt.Errorf("no OS to OS type mapping for %s", os)
+ return params.Unknown, fmt.Errorf("no OS to OS type mapping for %s", os)
}
return osType, nil
}
-func GetTools(osType commonParams.OSType, osArch commonParams.OSArch, tools []*github.RunnerApplicationDownload) (github.RunnerApplicationDownload, error) {
+func GetTools(osType params.OSType, osArch params.OSArch, tools []params.RunnerApplicationDownload) (params.RunnerApplicationDownload, error) {
// Validate image OS. Linux only for now.
switch osType {
- case commonParams.Linux:
- case commonParams.Windows:
+ case params.Linux:
+ case params.Windows:
default:
- return github.RunnerApplicationDownload{}, fmt.Errorf("unsupported OS type: %s", osType)
+ return params.RunnerApplicationDownload{}, fmt.Errorf("unsupported OS type: %s", osType)
}
switch osArch {
- case commonParams.Amd64:
- case commonParams.Arm:
- case commonParams.Arm64:
+ case params.Amd64:
+ case params.Arm:
+ case params.Arm64:
default:
- return github.RunnerApplicationDownload{}, fmt.Errorf("unsupported OS arch: %s", osArch)
+ return params.RunnerApplicationDownload{}, fmt.Errorf("unsupported OS arch: %s", osArch)
}
// Find tools for OS/Arch.
for _, tool := range tools {
- if tool == nil {
- continue
- }
- if tool.OS == nil || tool.Architecture == nil {
+ if tool.GetOS() == "" || tool.GetArchitecture() == "" {
continue
}
@@ -223,11 +216,11 @@ func GetTools(osType commonParams.OSType, osArch commonParams.OSArch, tools []*g
if err != nil {
continue
}
- if *tool.Architecture == ghArch && *tool.OS == ghOS {
- return *tool, nil
+ if tool.GetArchitecture() == ghArch && tool.GetOS() == ghOS {
+ return tool, nil
}
}
- return github.RunnerApplicationDownload{}, fmt.Errorf("failed to find tools for OS %s and arch %s", osType, osArch)
+ return params.RunnerApplicationDownload{}, fmt.Errorf("failed to find tools for OS %s and arch %s", osType, osArch)
}
// GetRandomString returns a secure random string
@@ -244,59 +237,6 @@ func GetRandomString(n int) (string, error) {
return string(data), nil
}
-func Aes256EncodeString(target string, passphrase string) ([]byte, error) {
- if len(passphrase) != 32 {
- return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
- }
-
- toEncrypt := []byte(target)
- block, err := aes.NewCipher([]byte(passphrase))
- if err != nil {
- return nil, errors.Wrap(err, "creating cipher")
- }
-
- aesgcm, err := cipher.NewGCM(block)
- if err != nil {
- return nil, errors.Wrap(err, "creating new aead")
- }
-
- nonce := make([]byte, aesgcm.NonceSize())
- if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
- return nil, errors.Wrap(err, "creating nonce")
- }
-
- ciphertext := aesgcm.Seal(nonce, nonce, toEncrypt, nil)
- return ciphertext, nil
-}
-
-func Aes256DecodeString(target []byte, passphrase string) (string, error) {
- if len(passphrase) != 32 {
- return "", fmt.Errorf("invalid passphrase length (expected length 32 characters)")
- }
-
- block, err := aes.NewCipher([]byte(passphrase))
- if err != nil {
- return "", errors.Wrap(err, "creating cipher")
- }
-
- aesgcm, err := cipher.NewGCM(block)
- if err != nil {
- return "", errors.Wrap(err, "creating new aead")
- }
-
- nonceSize := aesgcm.NonceSize()
- if len(target) < nonceSize {
- return "", fmt.Errorf("failed to decrypt text")
- }
-
- nonce, ciphertext := target[:nonceSize], target[nonceSize:]
- plaintext, err := aesgcm.Open(nil, nonce, ciphertext, nil)
- if err != nil {
- return "", fmt.Errorf("failed to decrypt text")
- }
- return string(plaintext), nil
-}
-
// PaswsordToBcrypt returns a bcrypt hash of the specified password using the default cost
func PaswsordToBcrypt(password string) (string, error) {
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/websocket/reader.go b/vendor/github.com/cloudbase/garm-provider-common/util/websocket/reader.go
new file mode 100644
index 00000000..92ed2edf
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/websocket/reader.go
@@ -0,0 +1,184 @@
+package websocket
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/gorilla/websocket"
+)
+
+const (
+ // Time allowed to write a message to the peer.
+ writeWait = 10 * time.Second
+
+ // Time allowed to read the next pong message from the peer.
+ pongWait = 60 * time.Second
+
+ // Send pings to peer with this period. Must be less than pongWait.
+ pingPeriod = (pongWait * 9) / 10
+
+ // Maximum message size allowed from peer.
+ maxMessageSize = 16384 // 16 KB
+)
+
+// MessageHandler is a function that processes a message received from a websocket connection.
+type MessageHandler func(msgType int, msg []byte) error
+
+type APIErrorResponse struct {
+ Error string `json:"error"`
+ Details string `json:"details"`
+}
+
+// NewReader creates a new websocket reader. The reader will pass on any message it receives to the
+// handler function. The handler function should return an error if it fails to process the message.
+func NewReader(ctx context.Context, baseURL, pth, token string, handler MessageHandler) (*Reader, error) {
+ parsedURL, err := url.Parse(baseURL)
+ if err != nil {
+ return nil, err
+ }
+
+ wsScheme := "ws"
+ if parsedURL.Scheme == "https" {
+ wsScheme = "wss"
+ }
+ u := url.URL{Scheme: wsScheme, Host: parsedURL.Host, Path: pth}
+ header := http.Header{}
+ header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
+
+ return &Reader{
+ ctx: ctx,
+ url: u,
+ header: header,
+ handler: handler,
+ done: make(chan struct{}),
+ }, nil
+}
+
+type Reader struct {
+ ctx context.Context
+ url url.URL
+ header http.Header
+
+ done chan struct{}
+ running bool
+
+ handler MessageHandler
+
+ conn *websocket.Conn
+ mux sync.Mutex
+ writeMux sync.Mutex
+}
+
+func (w *Reader) Stop() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if !w.running {
+ return
+ }
+ w.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
+ w.conn.Close()
+ close(w.done)
+ w.running = false
+}
+
+func (w *Reader) Done() <-chan struct{} {
+ return w.done
+}
+
+func (w *Reader) WriteMessage(messageType int, data []byte) error {
+ // The websocket package does not support concurrent writes and panics if it
+ // detects that one has occurred, so we need to lock the writeMux to prevent
+ // concurrent writes to the same connection.
+ w.writeMux.Lock()
+ defer w.writeMux.Unlock()
+ if !w.running {
+ return fmt.Errorf("websocket is not running")
+ }
+ if err := w.conn.SetWriteDeadline(time.Now().Add(writeWait)); err != nil {
+ return err
+ }
+ return w.conn.WriteMessage(messageType, data)
+}
+
+func (w *Reader) Start() error {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if w.running {
+ return nil
+ }
+
+ c, response, err := websocket.DefaultDialer.Dial(w.url.String(), w.header)
+ if err != nil {
+ var resp APIErrorResponse
+ var msg string
+ var status string
+ if response != nil {
+ if response.Body != nil {
+ if err := json.NewDecoder(response.Body).Decode(&resp); err == nil {
+ msg = resp.Details
+ }
+ }
+ status = response.Status
+ }
+ return fmt.Errorf("failed to stream logs: %q %s (%s)", err, msg, status)
+ }
+ w.conn = c
+ w.running = true
+ go w.loop()
+ go w.handlerReader()
+ return nil
+}
+
+func (w *Reader) handlerReader() {
+ defer w.Stop()
+ w.writeMux.Lock()
+ w.conn.SetReadLimit(maxMessageSize)
+ w.conn.SetReadDeadline(time.Now().Add(pongWait))
+ w.conn.SetPongHandler(func(string) error { w.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
+ w.writeMux.Unlock()
+ for {
+ msgType, message, err := w.conn.ReadMessage()
+ if err != nil {
+ if IsErrorOfInterest(err) {
+ // TODO(gabriel-samfira): we should allow for an error channel that can be used to signal
+ // the caller that the connection has been closed.
+ slog.With(slog.Any("error", err)).Error("reading log message")
+ }
+ return
+ }
+ if w.handler != nil {
+ if err := w.handler(msgType, message); err != nil {
+ slog.With(slog.Any("error", err)).Error("handling log message")
+ }
+ }
+ }
+}
+
+func (w *Reader) loop() {
+ defer w.Stop()
+ ticker := time.NewTicker(pingPeriod)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case <-w.Done():
+ return
+ case <-ticker.C:
+ w.writeMux.Lock()
+ w.conn.SetWriteDeadline(time.Now().Add(writeWait))
+ err := w.conn.WriteMessage(websocket.PingMessage, nil)
+ if err != nil {
+ w.writeMux.Unlock()
+ return
+ }
+ w.writeMux.Unlock()
+ }
+ }
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/websocket/util.go b/vendor/github.com/cloudbase/garm-provider-common/util/websocket/util.go
new file mode 100644
index 00000000..88c02fa5
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/websocket/util.go
@@ -0,0 +1,37 @@
+package websocket
+
+import (
+ "errors"
+ "net"
+
+ "github.com/gorilla/websocket"
+)
+
+func IsErrorOfInterest(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ if errors.Is(err, websocket.ErrCloseSent) {
+ return false
+ }
+
+ if errors.Is(err, websocket.ErrBadHandshake) {
+ return false
+ }
+
+ if errors.Is(err, net.ErrClosed) {
+ return false
+ }
+
+ asCloseErr, ok := err.(*websocket.CloseError)
+ if ok {
+ switch asCloseErr.Code {
+ case websocket.CloseNormalClosure, websocket.CloseGoingAway,
+ websocket.CloseNoStatusReceived, websocket.CloseAbnormalClosure:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cloudflare/circl/LICENSE b/vendor/github.com/cloudflare/circl/LICENSE
deleted file mode 100644
index 67edaa90..00000000
--- a/vendor/github.com/cloudflare/circl/LICENSE
+++ /dev/null
@@ -1,57 +0,0 @@
-Copyright (c) 2019 Cloudflare. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Cloudflare nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-========================================================================
-
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go
deleted file mode 100644
index f9057c2b..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package x25519
-
-import (
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-// ladderJoye calculates a fixed-point multiplication with the generator point.
-// The algorithm is the right-to-left Joye's ladder as described
-// in "How to precompute a ladder" in SAC'2017.
-func ladderJoye(k *Key) {
- w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved.
- fp.SetOne(&w[1]) // x1 = 1
- fp.SetOne(&w[2]) // z1 = 1
- w[3] = fp.Elt{ // x2 = G-S
- 0xbd, 0xaa, 0x2f, 0xc8, 0xfe, 0xe1, 0x94, 0x7e,
- 0xf8, 0xed, 0xb2, 0x14, 0xae, 0x95, 0xf0, 0xbb,
- 0xe2, 0x48, 0x5d, 0x23, 0xb9, 0xa0, 0xc7, 0xad,
- 0x34, 0xab, 0x7c, 0xe2, 0xee, 0xcd, 0xae, 0x1e,
- }
- fp.SetOne(&w[4]) // z2 = 1
-
- const n = 255
- const h = 3
- swap := uint(1)
- for s := 0; s < n-h; s++ {
- i := (s + h) / 8
- j := (s + h) % 8
- bit := uint((k[i] >> uint(j)) & 1)
- copy(w[0][:], tableGenerator[s*Size:(s+1)*Size])
- diffAdd(&w, swap^bit)
- swap = bit
- }
- for s := 0; s < h; s++ {
- double(&w[1], &w[2])
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-// ladderMontgomery calculates a generic scalar point multiplication
-// The algorithm implemented is the left-to-right Montgomery's ladder.
-func ladderMontgomery(k, xP *Key) {
- w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved.
- w[0] = *(*fp.Elt)(xP) // x1 = xP
- fp.SetOne(&w[1]) // x2 = 1
- w[3] = *(*fp.Elt)(xP) // x3 = xP
- fp.SetOne(&w[4]) // z3 = 1
-
- move := uint(0)
- for s := 255 - 1; s >= 0; s-- {
- i := s / 8
- j := s % 8
- bit := uint((k[i] >> uint(j)) & 1)
- ladderStep(&w, move^bit)
- move = bit
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-func toAffine(k *[fp.Size]byte, x, z *fp.Elt) {
- fp.Inv(z, z)
- fp.Mul(x, x, z)
- _ = fp.ToBytes(k[:], x)
-}
-
-var lowOrderPoints = [5]fp.Elt{
- { /* (0,_,1) point of order 2 on Curve25519 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (1,_,1) point of order 4 on Curve25519 */
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (x,_,1) first point of order 8 on Curve25519 */
- 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
- 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
- 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
- 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00,
- },
- { /* (x,_,1) second point of order 8 on Curve25519 */
- 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
- 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
- 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
- 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57,
- },
- { /* (-1,_,1) a point of order 4 on the twist of Curve25519 */
- 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go
deleted file mode 100644
index 8a3d54c5..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package x25519
-
-import (
- fp "github.com/cloudflare/circl/math/fp25519"
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func double(x, z *fp.Elt) { doubleAmd64(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) }
-
-//go:noescape
-func ladderStepAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func diffAddAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func doubleAmd64(x, z *fp.Elt)
-
-//go:noescape
-func mulA24Amd64(z, x *fp.Elt)
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h
deleted file mode 100644
index 8c1ae4d0..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#define ladderStepLeg \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulLeg(b0,x2,z3) \
- integerMulLeg(b1,x3,z2) \
- reduceFromDoubleLeg(t0,b0) \
- reduceFromDoubleLeg(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrLeg(b0,t0) \
- integerSqrLeg(b1,t1) \
- reduceFromDoubleLeg(x3,b0) \
- reduceFromDoubleLeg(z3,b1) \
- integerMulLeg(b0,x1,z3) \
- reduceFromDoubleLeg(z3,b0) \
- integerSqrLeg(b0,x2) \
- integerSqrLeg(b1,z2) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z2) \
- integerMulLeg(b0,x2,z2) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1)
-
-#define ladderStepBmi2Adx \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulAdx(b0,x2,z3) \
- integerMulAdx(b1,x3,z2) \
- reduceFromDoubleAdx(t0,b0) \
- reduceFromDoubleAdx(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrAdx(b0,t0) \
- integerSqrAdx(b1,t1) \
- reduceFromDoubleAdx(x3,b0) \
- reduceFromDoubleAdx(z3,b1) \
- integerMulAdx(b0,x1,z3) \
- reduceFromDoubleAdx(z3,b0) \
- integerSqrAdx(b0,x2) \
- integerSqrAdx(b1,z2) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z2) \
- integerMulAdx(b0,x2,z2) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1)
-
-#define difAddLeg \
- addSub(x1,z1) \
- integerMulLeg(b0,z1,ui) \
- reduceFromDoubleLeg(z1,b0) \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- integerMulLeg(b0,x1,z2) \
- integerMulLeg(b1,z1,x2) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define difAddBmi2Adx \
- addSub(x1,z1) \
- integerMulAdx(b0,z1,ui) \
- reduceFromDoubleAdx(z1,b0) \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- integerMulAdx(b0,x1,z2) \
- integerMulAdx(b1,z1,x2) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
-
-#define doubleLeg \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z1) \
- integerMulLeg(b0,x1,z1) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define doubleBmi2Adx \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z1) \
- integerMulAdx(b0,x1,z1) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s
deleted file mode 100644
index b7723185..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s
+++ /dev/null
@@ -1,156 +0,0 @@
-// +build amd64
-
-#include "textflag.h"
-
-// Depends on circl/math/fp25519 package
-#include "../../math/fp25519/fp_amd64.h"
-#include "curve_amd64.h"
-
-// CTE_A24 is (A+2)/4 from Curve25519
-#define CTE_A24 121666
-
-#define Size 32
-
-// multiplyA24Leg multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R13, FLAGS
-// Instr: x86_64, cmov
-#define multiplyA24Leg(z,x) \
- MOVL $CTE_A24, AX; MULQ 0+x; MOVQ AX, R8; MOVQ DX, R9; \
- MOVL $CTE_A24, AX; MULQ 8+x; MOVQ AX, R12; MOVQ DX, R10; \
- MOVL $CTE_A24, AX; MULQ 16+x; MOVQ AX, R13; MOVQ DX, R11; \
- MOVL $CTE_A24, AX; MULQ 24+x; \
- ADDQ R12, R9; \
- ADCQ R13, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- MOVL $38, AX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \
- IMULQ AX, DX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVQ $0, DX; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; MOVQ R8, 0+z;
-
-// multiplyA24Adx multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R12, FLAGS
-// Instr: x86_64, cmov, bmi2
-#define multiplyA24Adx(z,x) \
- MOVQ $CTE_A24, DX; \
- MULXQ 0+x, R8, R10; \
- MULXQ 8+x, R9, R11; ADDQ R10, R9; \
- MULXQ 16+x, R10, AX; ADCQ R11, R10; \
- MULXQ 24+x, R11, R12; ADCQ AX, R11; \
- ;;;;;;;;;;;;;;;;;;;;; ADCQ $0, R12; \
- MOVL $38, DX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \
- IMULQ DX, R12; \
- ADDQ R12, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVQ $0, R12; \
- CMOVQCS DX, R12; \
- ADDQ R12, R8; MOVQ R8, 0+z;
-
-#define mulA24Legacy \
- multiplyA24Leg(0(DI),0(SI))
-#define mulA24Bmi2Adx \
- multiplyA24Adx(0(DI),0(SI))
-
-// func mulA24Amd64(z, x *fp255.Elt)
-TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx)
-
-
-// func ladderStepAmd64(w *[5]fp255.Elt, b uint)
-// ladderStepAmd64 calculates a point addition and doubling as follows:
-// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-).
-// work = (x1,x2,z2,x3,z3) are five fp255.Elt of 32 bytes.
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·ladderStepAmd64(SB),NOSPLIT,$192-16
- // Parameters
- #define regWork DI
- #define regMove SI
- #define x1 0*Size(regWork)
- #define x2 1*Size(regWork)
- #define z2 2*Size(regWork)
- #define x3 3*Size(regWork)
- #define z3 4*Size(regWork)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regMove
- CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx)
- #undef regWork
- #undef regMove
- #undef x1
- #undef x2
- #undef z2
- #undef x3
- #undef z3
- #undef t0
- #undef t1
- #undef b0
- #undef b1
-
-// func diffAddAmd64(w *[5]fp255.Elt, b uint)
-// diffAddAmd64 calculates a differential point addition using a precomputed point.
-// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2)
-// w = (mu,x1,z1,x2,z2) are five fp.Elt, and
-// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·diffAddAmd64(SB),NOSPLIT,$128-16
- // Parameters
- #define regWork DI
- #define regSwap SI
- #define ui 0*Size(regWork)
- #define x1 1*Size(regWork)
- #define z1 2*Size(regWork)
- #define x2 3*Size(regWork)
- #define z2 4*Size(regWork)
- // Local variables
- #define b0 0*Size(SP)
- #define b1 2*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regSwap
- cswap(x1,x2,regSwap)
- cswap(z1,z2,regSwap)
- CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx)
- #undef regWork
- #undef regSwap
- #undef ui
- #undef x1
- #undef z1
- #undef x2
- #undef z2
- #undef b0
- #undef b1
-
-// func doubleAmd64(x, z *fp255.Elt)
-// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1).
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·doubleAmd64(SB),NOSPLIT,$192-16
- // Parameters
- #define x1 0(DI)
- #define z1 0(SI)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ x+0(FP), DI
- MOVQ z+8(FP), SI
- CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx)
- #undef x1
- #undef z1
- #undef t0
- #undef t1
- #undef b0
- #undef b1
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go
deleted file mode 100644
index dae67ea3..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package x25519
-
-import (
- "encoding/binary"
- "math/bits"
-
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-func doubleGeneric(x, z *fp.Elt) {
- t0, t1 := &fp.Elt{}, &fp.Elt{}
- fp.AddSub(x, z)
- fp.Sqr(x, x)
- fp.Sqr(z, z)
- fp.Sub(t0, x, z)
- mulA24Generic(t1, t0)
- fp.Add(t1, t1, z)
- fp.Mul(x, x, z)
- fp.Mul(z, t0, t1)
-}
-
-func diffAddGeneric(w *[5]fp.Elt, b uint) {
- mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4]
- fp.Cswap(x1, x2, b)
- fp.Cswap(z1, z2, b)
- fp.AddSub(x1, z1)
- fp.Mul(z1, z1, mu)
- fp.AddSub(x1, z1)
- fp.Sqr(x1, x1)
- fp.Sqr(z1, z1)
- fp.Mul(x1, x1, z2)
- fp.Mul(z1, z1, x2)
-}
-
-func ladderStepGeneric(w *[5]fp.Elt, b uint) {
- x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4]
- t0 := &fp.Elt{}
- t1 := &fp.Elt{}
- fp.AddSub(x2, z2)
- fp.AddSub(x3, z3)
- fp.Mul(t0, x2, z3)
- fp.Mul(t1, x3, z2)
- fp.AddSub(t0, t1)
- fp.Cmov(x2, x3, b)
- fp.Cmov(z2, z3, b)
- fp.Sqr(x3, t0)
- fp.Sqr(z3, t1)
- fp.Mul(z3, x1, z3)
- fp.Sqr(x2, x2)
- fp.Sqr(z2, z2)
- fp.Sub(t0, x2, z2)
- mulA24Generic(t1, t0)
- fp.Add(t1, t1, z2)
- fp.Mul(x2, x2, z2)
- fp.Mul(z2, t0, t1)
-}
-
-func mulA24Generic(z, x *fp.Elt) {
- const A24 = 121666
- const n = 8
- var xx [4]uint64
- for i := range xx {
- xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n])
- }
-
- h0, l0 := bits.Mul64(xx[0], A24)
- h1, l1 := bits.Mul64(xx[1], A24)
- h2, l2 := bits.Mul64(xx[2], A24)
- h3, l3 := bits.Mul64(xx[3], A24)
-
- var c3 uint64
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
- _, l4 = bits.Mul64(l4, 38)
- l0, c0 = bits.Add64(l0, l4, 0)
- xx[1], c1 = bits.Add64(l1, 0, c0)
- xx[2], c2 = bits.Add64(l2, 0, c1)
- xx[3], c3 = bits.Add64(l3, 0, c2)
- xx[0], _ = bits.Add64(l0, (-c3)&38, 0)
- for i := range xx {
- binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i])
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go
deleted file mode 100644
index 07fab97d..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package x25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-func double(x, z *fp.Elt) { doubleGeneric(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go
deleted file mode 100644
index 3ce102d1..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Package x25519 provides Diffie-Hellman functions as specified in RFC-7748.
-
-Validation of public keys.
-
-The Diffie-Hellman function, as described in RFC-7748 [1], works for any
-public key. However, if a different protocol requires contributory
-behaviour [2,3], then the public keys must be validated against low-order
-points [3,4]. To do that, the Shared function performs this validation
-internally and returns false when the public key is invalid (i.e., it
-is a low-order point).
-
-References:
- - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt)
- - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html)
- - [3] Bernstein (https://cr.yp.to/ecdh.html#validate)
- - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526)
-*/
-package x25519
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/key.go b/vendor/github.com/cloudflare/circl/dh/x25519/key.go
deleted file mode 100644
index c76f72ac..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/key.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package x25519
-
-import (
- "crypto/subtle"
-
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-// Size is the length in bytes of a X25519 key.
-const Size = 32
-
-// Key represents a X25519 key.
-type Key [Size]byte
-
-func (k *Key) clamp(in *Key) *Key {
- *k = *in
- k[0] &= 248
- k[31] = (k[31] & 127) | 64
- return k
-}
-
-// isValidPubKey verifies if the public key is not a low-order point.
-func (k *Key) isValidPubKey() bool {
- fp.Modp((*fp.Elt)(k))
- var isLowOrder int
- for _, P := range lowOrderPoints {
- isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:])
- }
- return isLowOrder == 0
-}
-
-// KeyGen obtains a public key given a secret key.
-func KeyGen(public, secret *Key) {
- ladderJoye(public.clamp(secret))
-}
-
-// Shared calculates Alice's shared key from Alice's secret key and Bob's
-// public key returning true on success. A failure case happens when the public
-// key is a low-order point, thus the shared key is all-zeros and the function
-// returns false.
-func Shared(shared, secret, public *Key) bool {
- validPk := *public
- validPk[31] &= (1 << (255 % 8)) - 1
- ok := validPk.isValidPubKey()
- ladderMontgomery(shared.clamp(secret), &validPk)
- return ok
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/table.go b/vendor/github.com/cloudflare/circl/dh/x25519/table.go
deleted file mode 100644
index 28c8c4ac..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/table.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package x25519
-
-import "github.com/cloudflare/circl/math/fp25519"
-
-// tableGenerator contains the set of points:
-//
-// t[i] = (xi+1)/(xi-1),
-//
-// where (xi,yi) = 2^iG and G is the generator point
-// Size = (256)*(256/8) = 8192 bytes.
-var tableGenerator = [256 * fp25519.Size]byte{
- /* (2^ 0)P */ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f,
- /* (2^ 1)P */ 0x96, 0xfe, 0xaa, 0x16, 0xf4, 0x20, 0x82, 0x6b, 0x34, 0x6a, 0x56, 0x4f, 0x2b, 0xeb, 0xeb, 0x82, 0x0f, 0x95, 0xa5, 0x75, 0xb0, 0xa5, 0xa9, 0xd5, 0xf4, 0x88, 0x24, 0x4b, 0xcf, 0xb2, 0x42, 0x51,
- /* (2^ 2)P */ 0x0c, 0x68, 0x69, 0x00, 0x75, 0xbc, 0xae, 0x6a, 0x41, 0x9c, 0xf9, 0xa0, 0x20, 0x78, 0xcf, 0x89, 0xf4, 0xd0, 0x56, 0x3b, 0x18, 0xd9, 0x58, 0x2a, 0xa4, 0x11, 0x60, 0xe3, 0x80, 0xca, 0x5a, 0x4b,
- /* (2^ 3)P */ 0x5d, 0x74, 0x29, 0x8c, 0x34, 0x32, 0x91, 0x32, 0xd7, 0x2f, 0x64, 0xe1, 0x16, 0xe6, 0xa2, 0xf4, 0x34, 0xbc, 0x67, 0xff, 0x03, 0xbb, 0x45, 0x1e, 0x4a, 0x9b, 0x2a, 0xf4, 0xd0, 0x12, 0x69, 0x30,
- /* (2^ 4)P */ 0x54, 0x71, 0xaf, 0xe6, 0x07, 0x65, 0x88, 0xff, 0x2f, 0xc8, 0xee, 0xdf, 0x13, 0x0e, 0xf5, 0x04, 0xce, 0xb5, 0xba, 0x2a, 0xe8, 0x2f, 0x51, 0xaa, 0x22, 0xf2, 0xd5, 0x68, 0x1a, 0x25, 0x4e, 0x17,
- /* (2^ 5)P */ 0x98, 0x88, 0x02, 0x82, 0x0d, 0x70, 0x96, 0xcf, 0xc5, 0x02, 0x2c, 0x0a, 0x37, 0xe3, 0x43, 0x17, 0xaa, 0x6e, 0xe8, 0xb4, 0x98, 0xec, 0x9e, 0x37, 0x2e, 0x48, 0xe0, 0x51, 0x8a, 0x88, 0x59, 0x0c,
- /* (2^ 6)P */ 0x89, 0xd1, 0xb5, 0x99, 0xd6, 0xf1, 0xcb, 0xfb, 0x84, 0xdc, 0x9f, 0x8e, 0xd5, 0xf0, 0xae, 0xac, 0x14, 0x76, 0x1f, 0x23, 0x06, 0x0d, 0xc2, 0xc1, 0x72, 0xf9, 0x74, 0xa2, 0x8d, 0x21, 0x38, 0x29,
- /* (2^ 7)P */ 0x18, 0x7f, 0x1d, 0xff, 0xbe, 0x49, 0xaf, 0xf6, 0xc2, 0xc9, 0x7a, 0x38, 0x22, 0x1c, 0x54, 0xcc, 0x6b, 0xc5, 0x15, 0x40, 0xef, 0xc9, 0xfc, 0x96, 0xa9, 0x13, 0x09, 0x69, 0x7c, 0x62, 0xc1, 0x69,
- /* (2^ 8)P */ 0x0e, 0xdb, 0x33, 0x47, 0x2f, 0xfd, 0x86, 0x7a, 0xe9, 0x7d, 0x08, 0x9e, 0xf2, 0xc4, 0xb8, 0xfd, 0x29, 0xa2, 0xa2, 0x8e, 0x1a, 0x4b, 0x5e, 0x09, 0x79, 0x7a, 0xb3, 0x29, 0xc8, 0xa7, 0xd7, 0x1a,
- /* (2^ 9)P */ 0xc0, 0xa0, 0x7e, 0xd1, 0xca, 0x89, 0x2d, 0x34, 0x51, 0x20, 0xed, 0xcc, 0xa6, 0xdd, 0xbe, 0x67, 0x74, 0x2f, 0xb4, 0x2b, 0xbf, 0x31, 0xca, 0x19, 0xbb, 0xac, 0x80, 0x49, 0xc8, 0xb4, 0xf7, 0x3d,
- /* (2^ 10)P */ 0x83, 0xd8, 0x0a, 0xc8, 0x4d, 0x44, 0xc6, 0xa8, 0x85, 0xab, 0xe3, 0x66, 0x03, 0x44, 0x1e, 0xb9, 0xd8, 0xf6, 0x64, 0x01, 0xa0, 0xcd, 0x15, 0xc2, 0x68, 0xe6, 0x47, 0xf2, 0x6e, 0x7c, 0x86, 0x3d,
- /* (2^ 11)P */ 0x8c, 0x65, 0x3e, 0xcc, 0x2b, 0x58, 0xdd, 0xc7, 0x28, 0x55, 0x0e, 0xee, 0x48, 0x47, 0x2c, 0xfd, 0x71, 0x4f, 0x9f, 0xcc, 0x95, 0x9b, 0xfd, 0xa0, 0xdf, 0x5d, 0x67, 0xb0, 0x71, 0xd8, 0x29, 0x75,
- /* (2^ 12)P */ 0x78, 0xbd, 0x3c, 0x2d, 0xb4, 0x68, 0xf5, 0xb8, 0x82, 0xda, 0xf3, 0x91, 0x1b, 0x01, 0x33, 0x12, 0x62, 0x3b, 0x7c, 0x4a, 0xcd, 0x6c, 0xce, 0x2d, 0x03, 0x86, 0x49, 0x9e, 0x8e, 0xfc, 0xe7, 0x75,
- /* (2^ 13)P */ 0xec, 0xb6, 0xd0, 0xfc, 0xf1, 0x13, 0x4f, 0x2f, 0x45, 0x7a, 0xff, 0x29, 0x1f, 0xca, 0xa8, 0xf1, 0x9b, 0xe2, 0x81, 0x29, 0xa7, 0xc1, 0x49, 0xc2, 0x6a, 0xb5, 0x83, 0x8c, 0xbb, 0x0d, 0xbe, 0x6e,
- /* (2^ 14)P */ 0x22, 0xb2, 0x0b, 0x17, 0x8d, 0xfa, 0x14, 0x71, 0x5f, 0x93, 0x93, 0xbf, 0xd5, 0xdc, 0xa2, 0x65, 0x9a, 0x97, 0x9c, 0xb5, 0x68, 0x1f, 0xc4, 0xbd, 0x89, 0x92, 0xce, 0xa2, 0x79, 0xef, 0x0e, 0x2f,
- /* (2^ 15)P */ 0xce, 0x37, 0x3c, 0x08, 0x0c, 0xbf, 0xec, 0x42, 0x22, 0x63, 0x49, 0xec, 0x09, 0xbc, 0x30, 0x29, 0x0d, 0xac, 0xfe, 0x9c, 0xc1, 0xb0, 0x94, 0xf2, 0x80, 0xbb, 0xfa, 0xed, 0x4b, 0xaa, 0x80, 0x37,
- /* (2^ 16)P */ 0x29, 0xd9, 0xea, 0x7c, 0x3e, 0x7d, 0xc1, 0x56, 0xc5, 0x22, 0x57, 0x2e, 0xeb, 0x4b, 0xcb, 0xe7, 0x5a, 0xe1, 0xbf, 0x2d, 0x73, 0x31, 0xe9, 0x0c, 0xf8, 0x52, 0x10, 0x62, 0xc7, 0x83, 0xb8, 0x41,
- /* (2^ 17)P */ 0x50, 0x53, 0xd2, 0xc3, 0xa0, 0x5c, 0xf7, 0xdb, 0x51, 0xe3, 0xb1, 0x6e, 0x08, 0xbe, 0x36, 0x29, 0x12, 0xb2, 0xa9, 0xb4, 0x3c, 0xe0, 0x36, 0xc9, 0xaa, 0x25, 0x22, 0x32, 0x82, 0xbf, 0x45, 0x1d,
- /* (2^ 18)P */ 0xc5, 0x4c, 0x02, 0x6a, 0x03, 0xb1, 0x1a, 0xe8, 0x72, 0x9a, 0x4c, 0x30, 0x1c, 0x20, 0x12, 0xe2, 0xfc, 0xb1, 0x32, 0x68, 0xba, 0x3f, 0xd7, 0xc5, 0x81, 0x95, 0x83, 0x4d, 0x5a, 0xdb, 0xff, 0x20,
- /* (2^ 19)P */ 0xad, 0x0f, 0x5d, 0xbe, 0x67, 0xd3, 0x83, 0xa2, 0x75, 0x44, 0x16, 0x8b, 0xca, 0x25, 0x2b, 0x6c, 0x2e, 0xf2, 0xaa, 0x7c, 0x46, 0x35, 0x49, 0x9d, 0x49, 0xff, 0x85, 0xee, 0x8e, 0x40, 0x66, 0x51,
- /* (2^ 20)P */ 0x61, 0xe3, 0xb4, 0xfa, 0xa2, 0xba, 0x67, 0x3c, 0xef, 0x5c, 0xf3, 0x7e, 0xc6, 0x33, 0xe4, 0xb3, 0x1c, 0x9b, 0x15, 0x41, 0x92, 0x72, 0x59, 0x52, 0x33, 0xab, 0xb0, 0xd5, 0x92, 0x18, 0x62, 0x6a,
- /* (2^ 21)P */ 0xcb, 0xcd, 0x55, 0x75, 0x38, 0x4a, 0xb7, 0x20, 0x3f, 0x92, 0x08, 0x12, 0x0e, 0xa1, 0x2a, 0x53, 0xd1, 0x1d, 0x28, 0x62, 0x77, 0x7b, 0xa1, 0xea, 0xbf, 0x44, 0x5c, 0xf0, 0x43, 0x34, 0xab, 0x61,
- /* (2^ 22)P */ 0xf8, 0xde, 0x24, 0x23, 0x42, 0x6c, 0x7a, 0x25, 0x7f, 0xcf, 0xe3, 0x17, 0x10, 0x6c, 0x1c, 0x13, 0x57, 0xa2, 0x30, 0xf6, 0x39, 0x87, 0x75, 0x23, 0x80, 0x85, 0xa7, 0x01, 0x7a, 0x40, 0x5a, 0x29,
- /* (2^ 23)P */ 0xd9, 0xa8, 0x5d, 0x6d, 0x24, 0x43, 0xc4, 0xf8, 0x5d, 0xfa, 0x52, 0x0c, 0x45, 0x75, 0xd7, 0x19, 0x3d, 0xf8, 0x1b, 0x73, 0x92, 0xfc, 0xfc, 0x2a, 0x00, 0x47, 0x2b, 0x1b, 0xe8, 0xc8, 0x10, 0x7d,
- /* (2^ 24)P */ 0x0b, 0xa2, 0xba, 0x70, 0x1f, 0x27, 0xe0, 0xc8, 0x57, 0x39, 0xa6, 0x7c, 0x86, 0x48, 0x37, 0x99, 0xbb, 0xd4, 0x7e, 0xcb, 0xb3, 0xef, 0x12, 0x54, 0x75, 0x29, 0xe6, 0x73, 0x61, 0xd3, 0x96, 0x31,
- /* (2^ 25)P */ 0xfc, 0xdf, 0xc7, 0x41, 0xd1, 0xca, 0x5b, 0xde, 0x48, 0xc8, 0x95, 0xb3, 0xd2, 0x8c, 0xcc, 0x47, 0xcb, 0xf3, 0x1a, 0xe1, 0x42, 0xd9, 0x4c, 0xa3, 0xc2, 0xce, 0x4e, 0xd0, 0xf2, 0xdb, 0x56, 0x02,
- /* (2^ 26)P */ 0x7f, 0x66, 0x0e, 0x4b, 0xe9, 0xb7, 0x5a, 0x87, 0x10, 0x0d, 0x85, 0xc0, 0x83, 0xdd, 0xd4, 0xca, 0x9f, 0xc7, 0x72, 0x4e, 0x8f, 0x2e, 0xf1, 0x47, 0x9b, 0xb1, 0x85, 0x8c, 0xbb, 0x87, 0x1a, 0x5f,
- /* (2^ 27)P */ 0xb8, 0x51, 0x7f, 0x43, 0xb6, 0xd0, 0xe9, 0x7a, 0x65, 0x90, 0x87, 0x18, 0x55, 0xce, 0xc7, 0x12, 0xee, 0x7a, 0xf7, 0x5c, 0xfe, 0x09, 0xde, 0x2a, 0x27, 0x56, 0x2c, 0x7d, 0x2f, 0x5a, 0xa0, 0x23,
- /* (2^ 28)P */ 0x9a, 0x16, 0x7c, 0xf1, 0x28, 0xe1, 0x08, 0x59, 0x2d, 0x85, 0xd0, 0x8a, 0xdd, 0x98, 0x74, 0xf7, 0x64, 0x2f, 0x10, 0xab, 0xce, 0xc4, 0xb4, 0x74, 0x45, 0x98, 0x13, 0x10, 0xdd, 0xba, 0x3a, 0x18,
- /* (2^ 29)P */ 0xac, 0xaa, 0x92, 0xaa, 0x8d, 0xba, 0x65, 0xb1, 0x05, 0x67, 0x38, 0x99, 0x95, 0xef, 0xc5, 0xd5, 0xd1, 0x40, 0xfc, 0xf8, 0x0c, 0x8f, 0x2f, 0xbe, 0x14, 0x45, 0x20, 0xee, 0x35, 0xe6, 0x01, 0x27,
- /* (2^ 30)P */ 0x14, 0x65, 0x15, 0x20, 0x00, 0xa8, 0x9f, 0x62, 0xce, 0xc1, 0xa8, 0x64, 0x87, 0x86, 0x23, 0xf2, 0x0e, 0x06, 0x3f, 0x0b, 0xff, 0x4f, 0x89, 0x5b, 0xfa, 0xa3, 0x08, 0xf7, 0x4c, 0x94, 0xd9, 0x60,
- /* (2^ 31)P */ 0x1f, 0x20, 0x7a, 0x1c, 0x1a, 0x00, 0xea, 0xae, 0x63, 0xce, 0xe2, 0x3e, 0x63, 0x6a, 0xf1, 0xeb, 0xe1, 0x07, 0x7a, 0x4c, 0x59, 0x09, 0x77, 0x6f, 0xcb, 0x08, 0x02, 0x0d, 0x15, 0x58, 0xb9, 0x79,
- /* (2^ 32)P */ 0xe7, 0x10, 0xd4, 0x01, 0x53, 0x5e, 0xb5, 0x24, 0x4d, 0xc8, 0xfd, 0xf3, 0xdf, 0x4e, 0xa3, 0xe3, 0xd8, 0x32, 0x40, 0x90, 0xe4, 0x68, 0x87, 0xd8, 0xec, 0xae, 0x3a, 0x7b, 0x42, 0x84, 0x13, 0x13,
- /* (2^ 33)P */ 0x14, 0x4f, 0x23, 0x86, 0x12, 0xe5, 0x05, 0x84, 0x29, 0xc5, 0xb4, 0xad, 0x39, 0x47, 0xdc, 0x14, 0xfd, 0x4f, 0x63, 0x50, 0xb2, 0xb5, 0xa2, 0xb8, 0x93, 0xff, 0xa7, 0xd8, 0x4a, 0xa9, 0xe2, 0x2f,
- /* (2^ 34)P */ 0xdd, 0xfa, 0x43, 0xe8, 0xef, 0x57, 0x5c, 0xec, 0x18, 0x99, 0xbb, 0xf0, 0x40, 0xce, 0x43, 0x28, 0x05, 0x63, 0x3d, 0xcf, 0xd6, 0x61, 0xb5, 0xa4, 0x7e, 0x77, 0xfb, 0xe8, 0xbd, 0x29, 0x36, 0x74,
- /* (2^ 35)P */ 0x8f, 0x73, 0xaf, 0xbb, 0x46, 0xdd, 0x3e, 0x34, 0x51, 0xa6, 0x01, 0xb1, 0x28, 0x18, 0x98, 0xed, 0x7a, 0x79, 0x2c, 0x88, 0x0b, 0x76, 0x01, 0xa4, 0x30, 0x87, 0xc8, 0x8d, 0xe2, 0x23, 0xc2, 0x1f,
- /* (2^ 36)P */ 0x0e, 0xba, 0x0f, 0xfc, 0x91, 0x4e, 0x60, 0x48, 0xa4, 0x6f, 0x2c, 0x05, 0x8f, 0xf7, 0x37, 0xb6, 0x9c, 0x23, 0xe9, 0x09, 0x3d, 0xac, 0xcc, 0x91, 0x7c, 0x68, 0x7a, 0x43, 0xd4, 0xee, 0xf7, 0x23,
- /* (2^ 37)P */ 0x00, 0xd8, 0x9b, 0x8d, 0x11, 0xb1, 0x73, 0x51, 0xa7, 0xd4, 0x89, 0x31, 0xb6, 0x41, 0xd6, 0x29, 0x86, 0xc5, 0xbb, 0x88, 0x79, 0x17, 0xbf, 0xfd, 0xf5, 0x1d, 0xd8, 0xca, 0x4f, 0x89, 0x59, 0x29,
- /* (2^ 38)P */ 0x99, 0xc8, 0xbb, 0xb4, 0xf3, 0x8e, 0xbc, 0xae, 0xb9, 0x92, 0x69, 0xb2, 0x5a, 0x99, 0x48, 0x41, 0xfb, 0x2c, 0xf9, 0x34, 0x01, 0x0b, 0xe2, 0x24, 0xe8, 0xde, 0x05, 0x4a, 0x89, 0x58, 0xd1, 0x40,
- /* (2^ 39)P */ 0xf6, 0x76, 0xaf, 0x85, 0x11, 0x0b, 0xb0, 0x46, 0x79, 0x7a, 0x18, 0x73, 0x78, 0xc7, 0xba, 0x26, 0x5f, 0xff, 0x8f, 0xab, 0x95, 0xbf, 0xc0, 0x3d, 0xd7, 0x24, 0x55, 0x94, 0xd8, 0x8b, 0x60, 0x2a,
- /* (2^ 40)P */ 0x02, 0x63, 0x44, 0xbd, 0x88, 0x95, 0x44, 0x26, 0x9c, 0x43, 0x88, 0x03, 0x1c, 0xc2, 0x4b, 0x7c, 0xb2, 0x11, 0xbd, 0x83, 0xf3, 0xa4, 0x98, 0x8e, 0xb9, 0x76, 0xd8, 0xc9, 0x7b, 0x8d, 0x21, 0x26,
- /* (2^ 41)P */ 0x8a, 0x17, 0x7c, 0x99, 0x42, 0x15, 0x08, 0xe3, 0x6f, 0x60, 0xb6, 0x6f, 0xa8, 0x29, 0x2d, 0x3c, 0x74, 0x93, 0x27, 0xfa, 0x36, 0x77, 0x21, 0x5c, 0xfa, 0xb1, 0xfe, 0x4a, 0x73, 0x05, 0xde, 0x7d,
- /* (2^ 42)P */ 0xab, 0x2b, 0xd4, 0x06, 0x39, 0x0e, 0xf1, 0x3b, 0x9c, 0x64, 0x80, 0x19, 0x3e, 0x80, 0xf7, 0xe4, 0x7a, 0xbf, 0x95, 0x95, 0xf8, 0x3b, 0x05, 0xe6, 0x30, 0x55, 0x24, 0xda, 0x38, 0xaf, 0x4f, 0x39,
- /* (2^ 43)P */ 0xf4, 0x28, 0x69, 0x89, 0x58, 0xfb, 0x8e, 0x7a, 0x3c, 0x11, 0x6a, 0xcc, 0xe9, 0x78, 0xc7, 0xfb, 0x6f, 0x59, 0xaf, 0x30, 0xe3, 0x0c, 0x67, 0x72, 0xf7, 0x6c, 0x3d, 0x1d, 0xa8, 0x22, 0xf2, 0x48,
- /* (2^ 44)P */ 0xa7, 0xca, 0x72, 0x0d, 0x41, 0xce, 0x1f, 0xf0, 0x95, 0x55, 0x3b, 0x21, 0xc7, 0xec, 0x20, 0x5a, 0x83, 0x14, 0xfa, 0xc1, 0x65, 0x11, 0xc2, 0x7b, 0x41, 0xa7, 0xa8, 0x1d, 0xe3, 0x9a, 0xf8, 0x07,
- /* (2^ 45)P */ 0xf9, 0x0f, 0x83, 0xc6, 0xb4, 0xc2, 0xd2, 0x05, 0x93, 0x62, 0x31, 0xc6, 0x0f, 0x33, 0x3e, 0xd4, 0x04, 0xa9, 0xd3, 0x96, 0x0a, 0x59, 0xa5, 0xa5, 0xb6, 0x33, 0x53, 0xa6, 0x91, 0xdb, 0x5e, 0x70,
- /* (2^ 46)P */ 0xf7, 0xa5, 0xb9, 0x0b, 0x5e, 0xe1, 0x8e, 0x04, 0x5d, 0xaf, 0x0a, 0x9e, 0xca, 0xcf, 0x40, 0x32, 0x0b, 0xa4, 0xc4, 0xed, 0xce, 0x71, 0x4b, 0x8f, 0x6d, 0x4a, 0x54, 0xde, 0xa3, 0x0d, 0x1c, 0x62,
- /* (2^ 47)P */ 0x91, 0x40, 0x8c, 0xa0, 0x36, 0x28, 0x87, 0x92, 0x45, 0x14, 0xc9, 0x10, 0xb0, 0x75, 0x83, 0xce, 0x94, 0x63, 0x27, 0x4f, 0x52, 0xeb, 0x72, 0x8a, 0x35, 0x36, 0xc8, 0x7e, 0xfa, 0xfc, 0x67, 0x26,
- /* (2^ 48)P */ 0x2a, 0x75, 0xe8, 0x45, 0x33, 0x17, 0x4c, 0x7f, 0xa5, 0x79, 0x70, 0xee, 0xfe, 0x47, 0x1b, 0x06, 0x34, 0xff, 0x86, 0x9f, 0xfa, 0x9a, 0xdd, 0x25, 0x9c, 0xc8, 0x5d, 0x42, 0xf5, 0xce, 0x80, 0x37,
- /* (2^ 49)P */ 0xe9, 0xb4, 0x3b, 0x51, 0x5a, 0x03, 0x46, 0x1a, 0xda, 0x5a, 0x57, 0xac, 0x79, 0xf3, 0x1e, 0x3e, 0x50, 0x4b, 0xa2, 0x5f, 0x1c, 0x5f, 0x8c, 0xc7, 0x22, 0x9f, 0xfd, 0x34, 0x76, 0x96, 0x1a, 0x32,
- /* (2^ 50)P */ 0xfa, 0x27, 0x6e, 0x82, 0xb8, 0x07, 0x67, 0x94, 0xd0, 0x6f, 0x50, 0x4c, 0xd6, 0x84, 0xca, 0x3d, 0x36, 0x14, 0xe9, 0x75, 0x80, 0x21, 0x89, 0xc1, 0x84, 0x84, 0x3b, 0x9b, 0x16, 0x84, 0x92, 0x6d,
- /* (2^ 51)P */ 0xdf, 0x2d, 0x3f, 0x38, 0x40, 0xe8, 0x67, 0x3a, 0x75, 0x9b, 0x4f, 0x0c, 0xa3, 0xc9, 0xee, 0x33, 0x47, 0xef, 0x83, 0xa7, 0x6f, 0xc8, 0xc7, 0x3e, 0xc4, 0xfb, 0xc9, 0xba, 0x9f, 0x44, 0xec, 0x26,
- /* (2^ 52)P */ 0x7d, 0x9e, 0x9b, 0xa0, 0xcb, 0x38, 0x0f, 0x5c, 0x8c, 0x47, 0xa3, 0x62, 0xc7, 0x8c, 0x16, 0x81, 0x1c, 0x12, 0xfc, 0x06, 0xd3, 0xb0, 0x23, 0x3e, 0xdd, 0xdc, 0xef, 0xa5, 0xa0, 0x8a, 0x23, 0x5a,
- /* (2^ 53)P */ 0xff, 0x43, 0xea, 0xc4, 0x21, 0x61, 0xa2, 0x1b, 0xb5, 0x32, 0x88, 0x7c, 0x7f, 0xc7, 0xf8, 0x36, 0x9a, 0xf9, 0xdc, 0x0a, 0x0b, 0xea, 0xfb, 0x88, 0xf9, 0xeb, 0x5b, 0xc2, 0x8e, 0x93, 0xa9, 0x5c,
- /* (2^ 54)P */ 0xa0, 0xcd, 0xfc, 0x51, 0x5e, 0x6a, 0x43, 0xd5, 0x3b, 0x89, 0xcd, 0xc2, 0x97, 0x47, 0xbc, 0x1d, 0x08, 0x4a, 0x22, 0xd3, 0x65, 0x6a, 0x34, 0x19, 0x66, 0xf4, 0x9a, 0x9b, 0xe4, 0x34, 0x50, 0x0f,
- /* (2^ 55)P */ 0x6e, 0xb9, 0xe0, 0xa1, 0x67, 0x39, 0x3c, 0xf2, 0x88, 0x4d, 0x7a, 0x86, 0xfa, 0x08, 0x8b, 0xe5, 0x79, 0x16, 0x34, 0xa7, 0xc6, 0xab, 0x2f, 0xfb, 0x46, 0x69, 0x02, 0xb6, 0x1e, 0x38, 0x75, 0x2a,
- /* (2^ 56)P */ 0xac, 0x20, 0x94, 0xc1, 0xe4, 0x3b, 0x0a, 0xc8, 0xdc, 0xb6, 0xf2, 0x81, 0xc6, 0xf6, 0xb1, 0x66, 0x88, 0x33, 0xe9, 0x61, 0x67, 0x03, 0xf7, 0x7c, 0xc4, 0xa4, 0x60, 0xa6, 0xd8, 0xbb, 0xab, 0x25,
- /* (2^ 57)P */ 0x98, 0x51, 0xfd, 0x14, 0xba, 0x12, 0xea, 0x91, 0xa9, 0xff, 0x3c, 0x4a, 0xfc, 0x50, 0x49, 0x68, 0x28, 0xad, 0xf5, 0x30, 0x21, 0x84, 0x26, 0xf8, 0x41, 0xa4, 0x01, 0x53, 0xf7, 0x88, 0xa9, 0x3e,
- /* (2^ 58)P */ 0x6f, 0x8c, 0x5f, 0x69, 0x9a, 0x10, 0x78, 0xc9, 0xf3, 0xc3, 0x30, 0x05, 0x4a, 0xeb, 0x46, 0x17, 0x95, 0x99, 0x45, 0xb4, 0x77, 0x6d, 0x4d, 0x44, 0xc7, 0x5c, 0x4e, 0x05, 0x8c, 0x2b, 0x95, 0x75,
- /* (2^ 59)P */ 0xaa, 0xd6, 0xf4, 0x15, 0x79, 0x3f, 0x70, 0xa3, 0xd8, 0x47, 0x26, 0x2f, 0x20, 0x46, 0xc3, 0x66, 0x4b, 0x64, 0x1d, 0x81, 0xdf, 0x69, 0x14, 0xd0, 0x1f, 0xd7, 0xa5, 0x81, 0x7d, 0xa4, 0xfe, 0x77,
- /* (2^ 60)P */ 0x81, 0xa3, 0x7c, 0xf5, 0x9e, 0x52, 0xe9, 0xc5, 0x1a, 0x88, 0x2f, 0xce, 0xb9, 0xb4, 0xee, 0x6e, 0xd6, 0x9b, 0x00, 0xe8, 0x28, 0x1a, 0xe9, 0xb6, 0xec, 0x3f, 0xfc, 0x9a, 0x3e, 0xbe, 0x80, 0x4b,
- /* (2^ 61)P */ 0xc5, 0xd2, 0xae, 0x26, 0xc5, 0x73, 0x37, 0x7e, 0x9d, 0xa4, 0xc9, 0x53, 0xb4, 0xfc, 0x4a, 0x1b, 0x4d, 0xb2, 0xff, 0xba, 0xd7, 0xbd, 0x20, 0xa9, 0x0e, 0x40, 0x2d, 0x12, 0x9f, 0x69, 0x54, 0x7c,
- /* (2^ 62)P */ 0xc8, 0x4b, 0xa9, 0x4f, 0xe1, 0xc8, 0x46, 0xef, 0x5e, 0xed, 0x52, 0x29, 0xce, 0x74, 0xb0, 0xe0, 0xd5, 0x85, 0xd8, 0xdb, 0xe1, 0x50, 0xa4, 0xbe, 0x2c, 0x71, 0x0f, 0x32, 0x49, 0x86, 0xb6, 0x61,
- /* (2^ 63)P */ 0xd1, 0xbd, 0xcc, 0x09, 0x73, 0x5f, 0x48, 0x8a, 0x2d, 0x1a, 0x4d, 0x7d, 0x0d, 0x32, 0x06, 0xbd, 0xf4, 0xbe, 0x2d, 0x32, 0x73, 0x29, 0x23, 0x25, 0x70, 0xf7, 0x17, 0x8c, 0x75, 0xc4, 0x5d, 0x44,
- /* (2^ 64)P */ 0x3c, 0x93, 0xc8, 0x7c, 0x17, 0x34, 0x04, 0xdb, 0x9f, 0x05, 0xea, 0x75, 0x21, 0xe8, 0x6f, 0xed, 0x34, 0xdb, 0x53, 0xc0, 0xfd, 0xbe, 0xfe, 0x1e, 0x99, 0xaf, 0x5d, 0xc6, 0x67, 0xe8, 0xdb, 0x4a,
- /* (2^ 65)P */ 0xdf, 0x09, 0x06, 0xa9, 0xa2, 0x71, 0xcd, 0x3a, 0x50, 0x40, 0xd0, 0x6d, 0x85, 0x91, 0xe9, 0xe5, 0x3c, 0xc2, 0x57, 0x81, 0x68, 0x9b, 0xc6, 0x1e, 0x4d, 0xfe, 0x5c, 0x88, 0xf6, 0x27, 0x74, 0x69,
- /* (2^ 66)P */ 0x51, 0xa8, 0xe1, 0x65, 0x9b, 0x7b, 0xbe, 0xd7, 0xdd, 0x36, 0xc5, 0x22, 0xd5, 0x28, 0x3d, 0xa0, 0x45, 0xb6, 0xd2, 0x8f, 0x65, 0x9d, 0x39, 0x28, 0xe1, 0x41, 0x26, 0x7c, 0xe1, 0xb7, 0xe5, 0x49,
- /* (2^ 67)P */ 0xa4, 0x57, 0x04, 0x70, 0x98, 0x3a, 0x8c, 0x6f, 0x78, 0x67, 0xbb, 0x5e, 0xa2, 0xf0, 0x78, 0x50, 0x0f, 0x96, 0x82, 0xc3, 0xcb, 0x3c, 0x3c, 0xd1, 0xb1, 0x84, 0xdf, 0xa7, 0x58, 0x32, 0x00, 0x2e,
- /* (2^ 68)P */ 0x1c, 0x6a, 0x29, 0xe6, 0x9b, 0xf3, 0xd1, 0x8a, 0xb2, 0xbf, 0x5f, 0x2a, 0x65, 0xaa, 0xee, 0xc1, 0xcb, 0xf3, 0x26, 0xfd, 0x73, 0x06, 0xee, 0x33, 0xcc, 0x2c, 0x9d, 0xa6, 0x73, 0x61, 0x25, 0x59,
- /* (2^ 69)P */ 0x41, 0xfc, 0x18, 0x4e, 0xaa, 0x07, 0xea, 0x41, 0x1e, 0xa5, 0x87, 0x7c, 0x52, 0x19, 0xfc, 0xd9, 0x6f, 0xca, 0x31, 0x58, 0x80, 0xcb, 0xaa, 0xbd, 0x4f, 0x69, 0x16, 0xc9, 0x2d, 0x65, 0x5b, 0x44,
- /* (2^ 70)P */ 0x15, 0x23, 0x17, 0xf2, 0xa7, 0xa3, 0x92, 0xce, 0x64, 0x99, 0x1b, 0xe1, 0x2d, 0x28, 0xdc, 0x1e, 0x4a, 0x31, 0x4c, 0xe0, 0xaf, 0x3a, 0x82, 0xa1, 0x86, 0xf5, 0x7c, 0x43, 0x94, 0x2d, 0x0a, 0x79,
- /* (2^ 71)P */ 0x09, 0xe0, 0xf6, 0x93, 0xfb, 0x47, 0xc4, 0x71, 0x76, 0x52, 0x84, 0x22, 0x67, 0xa5, 0x22, 0x89, 0x69, 0x51, 0x4f, 0x20, 0x3b, 0x90, 0x70, 0xbf, 0xfe, 0x19, 0xa3, 0x1b, 0x89, 0x89, 0x7a, 0x2f,
- /* (2^ 72)P */ 0x0c, 0x14, 0xe2, 0x77, 0xb5, 0x8e, 0xa0, 0x02, 0xf4, 0xdc, 0x7b, 0x42, 0xd4, 0x4e, 0x9a, 0xed, 0xd1, 0x3c, 0x32, 0xe4, 0x44, 0xec, 0x53, 0x52, 0x5b, 0x35, 0xe9, 0x14, 0x3c, 0x36, 0x88, 0x3e,
- /* (2^ 73)P */ 0x8c, 0x0b, 0x11, 0x77, 0x42, 0xc1, 0x66, 0xaa, 0x90, 0x33, 0xa2, 0x10, 0x16, 0x39, 0xe0, 0x1a, 0xa2, 0xc2, 0x3f, 0xc9, 0x12, 0xbd, 0x30, 0x20, 0xab, 0xc7, 0x55, 0x95, 0x57, 0x41, 0xe1, 0x3e,
- /* (2^ 74)P */ 0x41, 0x7d, 0x6e, 0x6d, 0x3a, 0xde, 0x14, 0x92, 0xfe, 0x7e, 0xf1, 0x07, 0x86, 0xd8, 0xcd, 0x3c, 0x17, 0x12, 0xe1, 0xf8, 0x88, 0x12, 0x4f, 0x67, 0xd0, 0x93, 0x9f, 0x32, 0x0f, 0x25, 0x82, 0x56,
- /* (2^ 75)P */ 0x6e, 0x39, 0x2e, 0x6d, 0x13, 0x0b, 0xf0, 0x6c, 0xbf, 0xde, 0x14, 0x10, 0x6f, 0xf8, 0x4c, 0x6e, 0x83, 0x4e, 0xcc, 0xbf, 0xb5, 0xb1, 0x30, 0x59, 0xb6, 0x16, 0xba, 0x8a, 0xb4, 0x69, 0x70, 0x04,
- /* (2^ 76)P */ 0x93, 0x07, 0xb2, 0x69, 0xab, 0xe4, 0x4c, 0x0d, 0x9e, 0xfb, 0xd0, 0x97, 0x1a, 0xb9, 0x4d, 0xb2, 0x1d, 0xd0, 0x00, 0x4e, 0xf5, 0x50, 0xfa, 0xcd, 0xb5, 0xdd, 0x8b, 0x36, 0x85, 0x10, 0x1b, 0x22,
- /* (2^ 77)P */ 0xd2, 0xd8, 0xe3, 0xb1, 0x68, 0x94, 0xe5, 0xe7, 0x93, 0x2f, 0x12, 0xbd, 0x63, 0x65, 0xc5, 0x53, 0x09, 0x3f, 0x66, 0xe0, 0x03, 0xa9, 0xe8, 0xee, 0x42, 0x3d, 0xbe, 0xcb, 0x62, 0xa6, 0xef, 0x61,
- /* (2^ 78)P */ 0x2a, 0xab, 0x6e, 0xde, 0xdd, 0xdd, 0xf8, 0x2c, 0x31, 0xf2, 0x35, 0x14, 0xd5, 0x0a, 0xf8, 0x9b, 0x73, 0x49, 0xf0, 0xc9, 0xce, 0xda, 0xea, 0x5d, 0x27, 0x9b, 0xd2, 0x41, 0x5d, 0x5b, 0x27, 0x29,
- /* (2^ 79)P */ 0x4f, 0xf1, 0xeb, 0x95, 0x08, 0x0f, 0xde, 0xcf, 0xa7, 0x05, 0x49, 0x05, 0x6b, 0xb9, 0xaa, 0xb9, 0xfd, 0x20, 0xc4, 0xa1, 0xd9, 0x0d, 0xe8, 0xca, 0xc7, 0xbb, 0x73, 0x16, 0x2f, 0xbf, 0x63, 0x0a,
- /* (2^ 80)P */ 0x8c, 0xbc, 0x8f, 0x95, 0x11, 0x6e, 0x2f, 0x09, 0xad, 0x2f, 0x82, 0x04, 0xe8, 0x81, 0x2a, 0x67, 0x17, 0x25, 0xd5, 0x60, 0x15, 0x35, 0xc8, 0xca, 0xf8, 0x92, 0xf1, 0xc8, 0x22, 0x77, 0x3f, 0x6f,
- /* (2^ 81)P */ 0xb7, 0x94, 0xe8, 0xc2, 0xcc, 0x90, 0xba, 0xf8, 0x0d, 0x9f, 0xff, 0x38, 0xa4, 0x57, 0x75, 0x2c, 0x59, 0x23, 0xe5, 0x5a, 0x85, 0x1d, 0x4d, 0x89, 0x69, 0x3d, 0x74, 0x7b, 0x15, 0x22, 0xe1, 0x68,
- /* (2^ 82)P */ 0xf3, 0x19, 0xb9, 0xcf, 0x70, 0x55, 0x7e, 0xd8, 0xb9, 0x8d, 0x79, 0x95, 0xcd, 0xde, 0x2c, 0x3f, 0xce, 0xa2, 0xc0, 0x10, 0x47, 0x15, 0x21, 0x21, 0xb2, 0xc5, 0x6d, 0x24, 0x15, 0xa1, 0x66, 0x3c,
- /* (2^ 83)P */ 0x72, 0xcb, 0x4e, 0x29, 0x62, 0xc5, 0xed, 0xcb, 0x16, 0x0b, 0x28, 0x6a, 0xc3, 0x43, 0x71, 0xba, 0x67, 0x8b, 0x07, 0xd4, 0xef, 0xc2, 0x10, 0x96, 0x1e, 0x4b, 0x6a, 0x94, 0x5d, 0x73, 0x44, 0x61,
- /* (2^ 84)P */ 0x50, 0x33, 0x5b, 0xd7, 0x1e, 0x11, 0x6f, 0x53, 0x1b, 0xd8, 0x41, 0x20, 0x8c, 0xdb, 0x11, 0x02, 0x3c, 0x41, 0x10, 0x0e, 0x00, 0xb1, 0x3c, 0xf9, 0x76, 0x88, 0x9e, 0x03, 0x3c, 0xfd, 0x9d, 0x14,
- /* (2^ 85)P */ 0x5b, 0x15, 0x63, 0x6b, 0xe4, 0xdd, 0x79, 0xd4, 0x76, 0x79, 0x83, 0x3c, 0xe9, 0x15, 0x6e, 0xb6, 0x38, 0xe0, 0x13, 0x1f, 0x3b, 0xe4, 0xfd, 0xda, 0x35, 0x0b, 0x4b, 0x2e, 0x1a, 0xda, 0xaf, 0x5f,
- /* (2^ 86)P */ 0x81, 0x75, 0x19, 0x17, 0xdf, 0xbb, 0x00, 0x36, 0xc2, 0xd2, 0x3c, 0xbe, 0x0b, 0x05, 0x72, 0x39, 0x86, 0xbe, 0xd5, 0xbd, 0x6d, 0x90, 0x38, 0x59, 0x0f, 0x86, 0x9b, 0x3f, 0xe4, 0xe5, 0xfc, 0x34,
- /* (2^ 87)P */ 0x02, 0x4d, 0xd1, 0x42, 0xcd, 0xa4, 0xa8, 0x75, 0x65, 0xdf, 0x41, 0x34, 0xc5, 0xab, 0x8d, 0x82, 0xd3, 0x31, 0xe1, 0xd2, 0xed, 0xab, 0xdc, 0x33, 0x5f, 0xd2, 0x14, 0xb8, 0x6f, 0xd7, 0xba, 0x3e,
- /* (2^ 88)P */ 0x0f, 0xe1, 0x70, 0x6f, 0x56, 0x6f, 0x90, 0xd4, 0x5a, 0x0f, 0x69, 0x51, 0xaa, 0xf7, 0x12, 0x5d, 0xf2, 0xfc, 0xce, 0x76, 0x6e, 0xb1, 0xad, 0x45, 0x99, 0x29, 0x23, 0xad, 0xae, 0x68, 0xf7, 0x01,
- /* (2^ 89)P */ 0xbd, 0xfe, 0x48, 0x62, 0x7b, 0xc7, 0x6c, 0x2b, 0xfd, 0xaf, 0x3a, 0xec, 0x28, 0x06, 0xd3, 0x3c, 0x6a, 0x48, 0xef, 0xd4, 0x80, 0x0b, 0x1c, 0xce, 0x23, 0x6c, 0xf6, 0xa6, 0x2e, 0xff, 0x3b, 0x4c,
- /* (2^ 90)P */ 0x5f, 0xeb, 0xea, 0x4a, 0x09, 0xc4, 0x2e, 0x3f, 0xa7, 0x2c, 0x37, 0x6e, 0x28, 0x9b, 0xb1, 0x61, 0x1d, 0x70, 0x2a, 0xde, 0x66, 0xa9, 0xef, 0x5e, 0xef, 0xe3, 0x55, 0xde, 0x65, 0x05, 0xb2, 0x23,
- /* (2^ 91)P */ 0x57, 0x85, 0xd5, 0x79, 0x52, 0xca, 0x01, 0xe3, 0x4f, 0x87, 0xc2, 0x27, 0xce, 0xd4, 0xb2, 0x07, 0x67, 0x1d, 0xcf, 0x9d, 0x8a, 0xcd, 0x32, 0xa5, 0x56, 0xff, 0x2b, 0x3f, 0xe2, 0xfe, 0x52, 0x2a,
- /* (2^ 92)P */ 0x3d, 0x66, 0xd8, 0x7c, 0xb3, 0xef, 0x24, 0x86, 0x94, 0x75, 0xbd, 0xff, 0x20, 0xac, 0xc7, 0xbb, 0x45, 0x74, 0xd3, 0x82, 0x9c, 0x5e, 0xb8, 0x57, 0x66, 0xec, 0xa6, 0x86, 0xcb, 0x52, 0x30, 0x7b,
- /* (2^ 93)P */ 0x1e, 0xe9, 0x25, 0x25, 0xad, 0xf0, 0x82, 0x34, 0xa0, 0xdc, 0x8e, 0xd2, 0x43, 0x80, 0xb6, 0x2c, 0x3a, 0x00, 0x1b, 0x2e, 0x05, 0x6d, 0x4f, 0xaf, 0x0a, 0x1b, 0x78, 0x29, 0x25, 0x8c, 0x5f, 0x18,
- /* (2^ 94)P */ 0xd6, 0xe0, 0x0c, 0xd8, 0x5b, 0xde, 0x41, 0xaa, 0xd6, 0xe9, 0x53, 0x68, 0x41, 0xb2, 0x07, 0x94, 0x3a, 0x4c, 0x7f, 0x35, 0x6e, 0xc3, 0x3e, 0x56, 0xce, 0x7b, 0x29, 0x0e, 0xdd, 0xb8, 0xc4, 0x4c,
- /* (2^ 95)P */ 0x0e, 0x73, 0xb8, 0xff, 0x52, 0x1a, 0xfc, 0xa2, 0x37, 0x8e, 0x05, 0x67, 0x6e, 0xf1, 0x11, 0x18, 0xe1, 0x4e, 0xdf, 0xcd, 0x66, 0xa3, 0xf9, 0x10, 0x99, 0xf0, 0xb9, 0xa0, 0xc4, 0xa0, 0xf4, 0x72,
- /* (2^ 96)P */ 0xa7, 0x4e, 0x3f, 0x66, 0x6f, 0xc0, 0x16, 0x8c, 0xba, 0x0f, 0x97, 0x4e, 0xf7, 0x3a, 0x3b, 0x69, 0x45, 0xc3, 0x9e, 0xd6, 0xf1, 0xe7, 0x02, 0x21, 0x89, 0x80, 0x8a, 0x96, 0xbc, 0x3c, 0xa5, 0x0b,
- /* (2^ 97)P */ 0x37, 0x55, 0xa1, 0xfe, 0xc7, 0x9d, 0x3d, 0xca, 0x93, 0x64, 0x53, 0x51, 0xbb, 0x24, 0x68, 0x4c, 0xb1, 0x06, 0x40, 0x84, 0x14, 0x63, 0x88, 0xb9, 0x60, 0xcc, 0x54, 0xb4, 0x2a, 0xa7, 0xd2, 0x40,
- /* (2^ 98)P */ 0x75, 0x09, 0x57, 0x12, 0xb7, 0xa1, 0x36, 0x59, 0x57, 0xa6, 0xbd, 0xde, 0x48, 0xd6, 0xb9, 0x91, 0xea, 0x30, 0x43, 0xb6, 0x4b, 0x09, 0x44, 0x33, 0xd0, 0x51, 0xee, 0x12, 0x0d, 0xa1, 0x6b, 0x00,
- /* (2^ 99)P */ 0x58, 0x5d, 0xde, 0xf5, 0x68, 0x84, 0x22, 0x19, 0xb0, 0x05, 0xcc, 0x38, 0x4c, 0x2f, 0xb1, 0x0e, 0x90, 0x19, 0x60, 0xd5, 0x9d, 0x9f, 0x03, 0xa1, 0x0b, 0x0e, 0xff, 0x4f, 0xce, 0xd4, 0x02, 0x45,
- /* (2^100)P */ 0x89, 0xc1, 0x37, 0x68, 0x10, 0x54, 0x20, 0xeb, 0x3c, 0xb9, 0xd3, 0x6d, 0x4c, 0x54, 0xf6, 0xd0, 0x4f, 0xd7, 0x16, 0xc4, 0x64, 0x70, 0x72, 0x40, 0xf0, 0x2e, 0x50, 0x4b, 0x11, 0xc6, 0x15, 0x6e,
- /* (2^101)P */ 0x6b, 0xa7, 0xb1, 0xcf, 0x98, 0xa3, 0xf2, 0x4d, 0xb1, 0xf6, 0xf2, 0x19, 0x74, 0x6c, 0x25, 0x11, 0x43, 0x60, 0x6e, 0x06, 0x62, 0x79, 0x49, 0x4a, 0x44, 0x5b, 0x35, 0x41, 0xab, 0x3a, 0x5b, 0x70,
- /* (2^102)P */ 0xd8, 0xb1, 0x97, 0xd7, 0x36, 0xf5, 0x5e, 0x36, 0xdb, 0xf0, 0xdd, 0x22, 0xd6, 0x6b, 0x07, 0x00, 0x88, 0x5a, 0x57, 0xe0, 0xb0, 0x33, 0xbf, 0x3b, 0x4d, 0xca, 0xe4, 0xc8, 0x05, 0xaa, 0x77, 0x37,
- /* (2^103)P */ 0x5f, 0xdb, 0x78, 0x55, 0xc8, 0x45, 0x27, 0x39, 0xe2, 0x5a, 0xae, 0xdb, 0x49, 0x41, 0xda, 0x6f, 0x67, 0x98, 0xdc, 0x8a, 0x0b, 0xb0, 0xf0, 0xb1, 0xa3, 0x1d, 0x6f, 0xd3, 0x37, 0x34, 0x96, 0x09,
- /* (2^104)P */ 0x53, 0x38, 0xdc, 0xa5, 0x90, 0x4e, 0x82, 0x7e, 0xbd, 0x5c, 0x13, 0x1f, 0x64, 0xf6, 0xb5, 0xcc, 0xcc, 0x8f, 0xce, 0x87, 0x6c, 0xd8, 0x36, 0x67, 0x9f, 0x24, 0x04, 0x66, 0xe2, 0x3c, 0x5f, 0x62,
- /* (2^105)P */ 0x3f, 0xf6, 0x02, 0x95, 0x05, 0xc8, 0x8a, 0xaf, 0x69, 0x14, 0x35, 0x2e, 0x0a, 0xe7, 0x05, 0x0c, 0x05, 0x63, 0x4b, 0x76, 0x9c, 0x2e, 0x29, 0x35, 0xc3, 0x3a, 0xe2, 0xc7, 0x60, 0x43, 0x39, 0x1a,
- /* (2^106)P */ 0x64, 0x32, 0x18, 0x51, 0x32, 0xd5, 0xc6, 0xd5, 0x4f, 0xb7, 0xc2, 0x43, 0xbd, 0x5a, 0x06, 0x62, 0x9b, 0x3f, 0x97, 0x3b, 0xd0, 0xf5, 0xfb, 0xb5, 0x5e, 0x6e, 0x20, 0x61, 0x36, 0xda, 0xa3, 0x13,
- /* (2^107)P */ 0xe5, 0x94, 0x5d, 0x72, 0x37, 0x58, 0xbd, 0xc6, 0xc5, 0x16, 0x50, 0x20, 0x12, 0x09, 0xe3, 0x18, 0x68, 0x3c, 0x03, 0x70, 0x15, 0xce, 0x88, 0x20, 0x87, 0x79, 0x83, 0x5c, 0x49, 0x1f, 0xba, 0x7f,
- /* (2^108)P */ 0x9d, 0x07, 0xf9, 0xf2, 0x23, 0x74, 0x8c, 0x5a, 0xc5, 0x3f, 0x02, 0x34, 0x7b, 0x15, 0x35, 0x17, 0x51, 0xb3, 0xfa, 0xd2, 0x9a, 0xb4, 0xf9, 0xe4, 0x3c, 0xe3, 0x78, 0xc8, 0x72, 0xff, 0x91, 0x66,
- /* (2^109)P */ 0x3e, 0xff, 0x5e, 0xdc, 0xde, 0x2a, 0x2c, 0x12, 0xf4, 0x6c, 0x95, 0xd8, 0xf1, 0x4b, 0xdd, 0xf8, 0xda, 0x5b, 0x9e, 0x9e, 0x5d, 0x20, 0x86, 0xeb, 0x43, 0xc7, 0x75, 0xd9, 0xb9, 0x92, 0x9b, 0x04,
- /* (2^110)P */ 0x5a, 0xc0, 0xf6, 0xb0, 0x30, 0x97, 0x37, 0xa5, 0x53, 0xa5, 0xf3, 0xc6, 0xac, 0xff, 0xa0, 0x72, 0x6d, 0xcd, 0x0d, 0xb2, 0x34, 0x2c, 0x03, 0xb0, 0x4a, 0x16, 0xd5, 0x88, 0xbc, 0x9d, 0x0e, 0x47,
- /* (2^111)P */ 0x47, 0xc0, 0x37, 0xa2, 0x0c, 0xf1, 0x9c, 0xb1, 0xa2, 0x81, 0x6c, 0x1f, 0x71, 0x66, 0x54, 0xb6, 0x43, 0x0b, 0xd8, 0x6d, 0xd1, 0x1b, 0x32, 0xb3, 0x8e, 0xbe, 0x5f, 0x0c, 0x60, 0x4f, 0xc1, 0x48,
- /* (2^112)P */ 0x03, 0xc8, 0xa6, 0x4a, 0x26, 0x1c, 0x45, 0x66, 0xa6, 0x7d, 0xfa, 0xa4, 0x04, 0x39, 0x6e, 0xb6, 0x95, 0x83, 0x12, 0xb3, 0xb0, 0x19, 0x5f, 0xd4, 0x10, 0xbc, 0xc9, 0xc3, 0x27, 0x26, 0x60, 0x31,
- /* (2^113)P */ 0x0d, 0xe1, 0xe4, 0x32, 0x48, 0xdc, 0x20, 0x31, 0xf7, 0x17, 0xc7, 0x56, 0x67, 0xc4, 0x20, 0xeb, 0x94, 0x02, 0x28, 0x67, 0x3f, 0x2e, 0xf5, 0x00, 0x09, 0xc5, 0x30, 0x47, 0xc1, 0x4f, 0x6d, 0x56,
- /* (2^114)P */ 0x06, 0x72, 0x83, 0xfd, 0x40, 0x5d, 0x3a, 0x7e, 0x7a, 0x54, 0x59, 0x71, 0xdc, 0x26, 0xe9, 0xc1, 0x95, 0x60, 0x8d, 0xa6, 0xfb, 0x30, 0x67, 0x21, 0xa7, 0xce, 0x69, 0x3f, 0x84, 0xc3, 0xe8, 0x22,
- /* (2^115)P */ 0x2b, 0x4b, 0x0e, 0x93, 0xe8, 0x74, 0xd0, 0x33, 0x16, 0x58, 0xd1, 0x84, 0x0e, 0x35, 0xe4, 0xb6, 0x65, 0x23, 0xba, 0xd6, 0x6a, 0xc2, 0x34, 0x55, 0xf3, 0xf3, 0xf1, 0x89, 0x2f, 0xc1, 0x73, 0x77,
- /* (2^116)P */ 0xaa, 0x62, 0x79, 0xa5, 0x4d, 0x40, 0xba, 0x8c, 0x56, 0xce, 0x99, 0x19, 0xa8, 0x97, 0x98, 0x5b, 0xfc, 0x92, 0x16, 0x12, 0x2f, 0x86, 0x8e, 0x50, 0x91, 0xc2, 0x93, 0xa0, 0x7f, 0x90, 0x81, 0x3a,
- /* (2^117)P */ 0x10, 0xa5, 0x25, 0x47, 0xff, 0xd0, 0xde, 0x0d, 0x03, 0xc5, 0x3f, 0x67, 0x10, 0xcc, 0xd8, 0x10, 0x89, 0x4e, 0x1f, 0x9f, 0x1c, 0x15, 0x9d, 0x5b, 0x4c, 0xa4, 0x09, 0xcb, 0xd5, 0xc1, 0xa5, 0x32,
- /* (2^118)P */ 0xfb, 0x41, 0x05, 0xb9, 0x42, 0xa4, 0x0a, 0x1e, 0xdb, 0x85, 0xb4, 0xc1, 0x7c, 0xeb, 0x85, 0x5f, 0xe5, 0xf2, 0x9d, 0x8a, 0xce, 0x95, 0xe5, 0xbe, 0x36, 0x22, 0x42, 0x22, 0xc7, 0x96, 0xe4, 0x25,
- /* (2^119)P */ 0xb9, 0xe5, 0x0f, 0xcd, 0x46, 0x3c, 0xdf, 0x5e, 0x88, 0x33, 0xa4, 0xd2, 0x7e, 0x5a, 0xe7, 0x34, 0x52, 0xe3, 0x61, 0xd7, 0x11, 0xde, 0x88, 0xe4, 0x5c, 0x54, 0x85, 0xa0, 0x01, 0x8a, 0x87, 0x0e,
- /* (2^120)P */ 0x04, 0xbb, 0x21, 0xe0, 0x77, 0x3c, 0x49, 0xba, 0x9a, 0x89, 0xdf, 0xc7, 0x43, 0x18, 0x4d, 0x2b, 0x67, 0x0d, 0xe8, 0x7a, 0x48, 0x7a, 0xa3, 0x9e, 0x94, 0x17, 0xe4, 0x11, 0x80, 0x95, 0xa9, 0x67,
- /* (2^121)P */ 0x65, 0xb0, 0x97, 0x66, 0x1a, 0x05, 0x58, 0x4b, 0xd4, 0xa6, 0x6b, 0x8d, 0x7d, 0x3f, 0xe3, 0x47, 0xc1, 0x46, 0xca, 0x83, 0xd4, 0xa8, 0x4d, 0xbb, 0x0d, 0xdb, 0xc2, 0x81, 0xa1, 0xca, 0xbe, 0x68,
- /* (2^122)P */ 0xa5, 0x9a, 0x98, 0x0b, 0xe9, 0x80, 0x89, 0x8d, 0x9b, 0xc9, 0x93, 0x2c, 0x4a, 0xb1, 0x5e, 0xf9, 0xa2, 0x73, 0x6e, 0x79, 0xc4, 0xc7, 0xc6, 0x51, 0x69, 0xb5, 0xef, 0xb5, 0x63, 0x83, 0x22, 0x6e,
- /* (2^123)P */ 0xc8, 0x24, 0xd6, 0x2d, 0xb0, 0xc0, 0xbb, 0xc6, 0xee, 0x70, 0x81, 0xec, 0x7d, 0xb4, 0x7e, 0x77, 0xa9, 0xaf, 0xcf, 0x04, 0xa0, 0x15, 0xde, 0x3c, 0x9b, 0xbf, 0x60, 0x71, 0x08, 0xbc, 0xc6, 0x1d,
- /* (2^124)P */ 0x02, 0x40, 0xc3, 0xee, 0x43, 0xe0, 0x07, 0x2e, 0x7f, 0xdc, 0x68, 0x7a, 0x67, 0xfc, 0xe9, 0x18, 0x9a, 0x5b, 0xd1, 0x8b, 0x18, 0x03, 0xda, 0xd8, 0x53, 0x82, 0x56, 0x00, 0xbb, 0xc3, 0xfb, 0x48,
- /* (2^125)P */ 0xe1, 0x4c, 0x65, 0xfb, 0x4c, 0x7d, 0x54, 0x57, 0xad, 0xe2, 0x58, 0xa0, 0x82, 0x5b, 0x56, 0xd3, 0x78, 0x44, 0x15, 0xbf, 0x0b, 0xaf, 0x3e, 0xf6, 0x18, 0xbb, 0xdf, 0x14, 0xf1, 0x1e, 0x53, 0x47,
- /* (2^126)P */ 0x87, 0xc5, 0x78, 0x42, 0x0a, 0x63, 0xec, 0xe1, 0xf3, 0x83, 0x8e, 0xca, 0x46, 0xd5, 0x07, 0x55, 0x2b, 0x0c, 0xdc, 0x3a, 0xc6, 0x35, 0xe1, 0x85, 0x4e, 0x84, 0x82, 0x56, 0xa8, 0xef, 0xa7, 0x0a,
- /* (2^127)P */ 0x15, 0xf6, 0xe1, 0xb3, 0xa8, 0x1b, 0x69, 0x72, 0xfa, 0x3f, 0xbe, 0x1f, 0x70, 0xe9, 0xb4, 0x32, 0x68, 0x78, 0xbb, 0x39, 0x2e, 0xd9, 0xb6, 0x97, 0xe8, 0x39, 0x2e, 0xa0, 0xde, 0x53, 0xfe, 0x2c,
- /* (2^128)P */ 0xb0, 0x52, 0xcd, 0x85, 0xcd, 0x92, 0x73, 0x68, 0x31, 0x98, 0xe2, 0x10, 0xc9, 0x66, 0xff, 0x27, 0x06, 0x2d, 0x83, 0xa9, 0x56, 0x45, 0x13, 0x97, 0xa0, 0xf8, 0x84, 0x0a, 0x36, 0xb0, 0x9b, 0x26,
- /* (2^129)P */ 0x5c, 0xf8, 0x43, 0x76, 0x45, 0x55, 0x6e, 0x70, 0x1b, 0x7d, 0x59, 0x9b, 0x8c, 0xa4, 0x34, 0x37, 0x72, 0xa4, 0xef, 0xc6, 0xe8, 0x91, 0xee, 0x7a, 0xe0, 0xd9, 0xa9, 0x98, 0xc1, 0xab, 0xd6, 0x5c,
- /* (2^130)P */ 0x1a, 0xe4, 0x3c, 0xcb, 0x06, 0xde, 0x04, 0x0e, 0x38, 0xe1, 0x02, 0x34, 0x89, 0xeb, 0xc6, 0xd8, 0x72, 0x37, 0x6e, 0x68, 0xbb, 0x59, 0x46, 0x90, 0xc8, 0xa8, 0x6b, 0x74, 0x71, 0xc3, 0x15, 0x72,
- /* (2^131)P */ 0xd9, 0xa2, 0xe4, 0xea, 0x7e, 0xa9, 0x12, 0xfd, 0xc5, 0xf2, 0x94, 0x63, 0x51, 0xb7, 0x14, 0x95, 0x94, 0xf2, 0x08, 0x92, 0x80, 0xd5, 0x6f, 0x26, 0xb9, 0x26, 0x9a, 0x61, 0x85, 0x70, 0x84, 0x5c,
- /* (2^132)P */ 0xea, 0x94, 0xd6, 0xfe, 0x10, 0x54, 0x98, 0x52, 0x54, 0xd2, 0x2e, 0x4a, 0x93, 0x5b, 0x90, 0x3c, 0x67, 0xe4, 0x3b, 0x2d, 0x69, 0x47, 0xbb, 0x10, 0xe1, 0xe9, 0xe5, 0x69, 0x2d, 0x3d, 0x3b, 0x06,
- /* (2^133)P */ 0xeb, 0x7d, 0xa5, 0xdd, 0xee, 0x26, 0x27, 0x47, 0x91, 0x18, 0xf4, 0x10, 0xae, 0xc4, 0xb6, 0xef, 0x14, 0x76, 0x30, 0x7b, 0x91, 0x41, 0x16, 0x2b, 0x7c, 0x5b, 0xf4, 0xc4, 0x4f, 0x55, 0x7c, 0x11,
- /* (2^134)P */ 0x12, 0x88, 0x9d, 0x8f, 0x11, 0xf3, 0x7c, 0xc0, 0x39, 0x79, 0x01, 0x50, 0x20, 0xd8, 0xdb, 0x01, 0x27, 0x28, 0x1b, 0x17, 0xf4, 0x03, 0xe8, 0xd7, 0xea, 0x25, 0xd2, 0x87, 0x74, 0xe8, 0x15, 0x10,
- /* (2^135)P */ 0x4d, 0xcc, 0x3a, 0xd2, 0xfe, 0xe3, 0x8d, 0xc5, 0x2d, 0xbe, 0xa7, 0x94, 0xc2, 0x91, 0xdb, 0x50, 0x57, 0xf4, 0x9c, 0x1c, 0x3d, 0xd4, 0x94, 0x0b, 0x4a, 0x52, 0x37, 0x6e, 0xfa, 0x40, 0x16, 0x6b,
- /* (2^136)P */ 0x09, 0x0d, 0xda, 0x5f, 0x6c, 0x34, 0x2f, 0x69, 0x51, 0x31, 0x4d, 0xfa, 0x59, 0x1c, 0x0b, 0x20, 0x96, 0xa2, 0x77, 0x07, 0x76, 0x6f, 0xc4, 0xb8, 0xcf, 0xfb, 0xfd, 0x3f, 0x5f, 0x39, 0x38, 0x4b,
- /* (2^137)P */ 0x71, 0xd6, 0x54, 0xbe, 0x00, 0x5e, 0xd2, 0x18, 0xa6, 0xab, 0xc8, 0xbe, 0x82, 0x05, 0xd5, 0x60, 0x82, 0xb9, 0x78, 0x3b, 0x26, 0x8f, 0xad, 0x87, 0x32, 0x04, 0xda, 0x9c, 0x4e, 0xf6, 0xfd, 0x50,
- /* (2^138)P */ 0xf0, 0xdc, 0x78, 0xc5, 0xaa, 0x67, 0xf5, 0x90, 0x3b, 0x13, 0xa3, 0xf2, 0x0e, 0x9b, 0x1e, 0xef, 0x71, 0xde, 0xd9, 0x42, 0x92, 0xba, 0xeb, 0x0e, 0xc7, 0x01, 0x31, 0xf0, 0x9b, 0x3c, 0x47, 0x15,
- /* (2^139)P */ 0x95, 0x80, 0xb7, 0x56, 0xae, 0xe8, 0x77, 0x7c, 0x8e, 0x07, 0x6f, 0x6e, 0x66, 0xe7, 0x78, 0xb6, 0x1f, 0xba, 0x48, 0x53, 0x61, 0xb9, 0xa0, 0x2d, 0x0b, 0x3f, 0x73, 0xff, 0xc1, 0x31, 0xf9, 0x7c,
- /* (2^140)P */ 0x6c, 0x36, 0x0a, 0x0a, 0xf5, 0x57, 0xb3, 0x26, 0x32, 0xd7, 0x87, 0x2b, 0xf4, 0x8c, 0x70, 0xe9, 0xc0, 0xb2, 0x1c, 0xf9, 0xa5, 0xee, 0x3a, 0xc1, 0x4c, 0xbb, 0x43, 0x11, 0x99, 0x0c, 0xd9, 0x35,
- /* (2^141)P */ 0xdc, 0xd9, 0xa0, 0xa9, 0x04, 0xc4, 0xc1, 0x47, 0x51, 0xd2, 0x72, 0x19, 0x45, 0x58, 0x9e, 0x65, 0x31, 0x8c, 0xb3, 0x73, 0xc4, 0xa8, 0x75, 0x38, 0x24, 0x1f, 0x56, 0x79, 0xd3, 0x9e, 0xbd, 0x1f,
- /* (2^142)P */ 0x8d, 0xc2, 0x1e, 0xd4, 0x6f, 0xbc, 0xfa, 0x11, 0xca, 0x2d, 0x2a, 0xcd, 0xe3, 0xdf, 0xf8, 0x7e, 0x95, 0x45, 0x40, 0x8c, 0x5d, 0x3b, 0xe7, 0x72, 0x27, 0x2f, 0xb7, 0x54, 0x49, 0xfa, 0x35, 0x61,
- /* (2^143)P */ 0x9c, 0xb6, 0x24, 0xde, 0xa2, 0x32, 0xfc, 0xcc, 0x88, 0x5d, 0x09, 0x1f, 0x8c, 0x69, 0x55, 0x3f, 0x29, 0xf9, 0xc3, 0x5a, 0xed, 0x50, 0x33, 0xbe, 0xeb, 0x7e, 0x47, 0xca, 0x06, 0xf8, 0x9b, 0x5e,
- /* (2^144)P */ 0x68, 0x9f, 0x30, 0x3c, 0xb6, 0x8f, 0xce, 0xe9, 0xf4, 0xf9, 0xe1, 0x65, 0x35, 0xf6, 0x76, 0x53, 0xf1, 0x93, 0x63, 0x5a, 0xb3, 0xcf, 0xaf, 0xd1, 0x06, 0x35, 0x62, 0xe5, 0xed, 0xa1, 0x32, 0x66,
- /* (2^145)P */ 0x4c, 0xed, 0x2d, 0x0c, 0x39, 0x6c, 0x7d, 0x0b, 0x1f, 0xcb, 0x04, 0xdf, 0x81, 0x32, 0xcb, 0x56, 0xc7, 0xc3, 0xec, 0x49, 0x12, 0x5a, 0x30, 0x66, 0x2a, 0xa7, 0x8c, 0xa3, 0x60, 0x8b, 0x58, 0x5d,
- /* (2^146)P */ 0x2d, 0xf4, 0xe5, 0xe8, 0x78, 0xbf, 0xec, 0xa6, 0xec, 0x3e, 0x8a, 0x3c, 0x4b, 0xb4, 0xee, 0x86, 0x04, 0x16, 0xd2, 0xfb, 0x48, 0x9c, 0x21, 0xec, 0x31, 0x67, 0xc3, 0x17, 0xf5, 0x1a, 0xaf, 0x1a,
- /* (2^147)P */ 0xe7, 0xbd, 0x69, 0x67, 0x83, 0xa2, 0x06, 0xc3, 0xdb, 0x2a, 0x1e, 0x2b, 0x62, 0x80, 0x82, 0x20, 0xa6, 0x94, 0xff, 0xfb, 0x1f, 0xf5, 0x27, 0x80, 0x6b, 0xf2, 0x24, 0x11, 0xce, 0xa1, 0xcf, 0x76,
- /* (2^148)P */ 0xb6, 0xab, 0x22, 0x24, 0x56, 0x00, 0xeb, 0x18, 0xc3, 0x29, 0x8c, 0x8f, 0xd5, 0xc4, 0x77, 0xf3, 0x1a, 0x56, 0x31, 0xf5, 0x07, 0xc2, 0xbb, 0x4d, 0x27, 0x8a, 0x12, 0x82, 0xf0, 0xb7, 0x53, 0x02,
- /* (2^149)P */ 0xe0, 0x17, 0x2c, 0xb6, 0x1c, 0x09, 0x1f, 0x3d, 0xa9, 0x28, 0x46, 0xd6, 0xab, 0xe1, 0x60, 0x48, 0x53, 0x42, 0x9d, 0x30, 0x36, 0x74, 0xd1, 0x52, 0x76, 0xe5, 0xfa, 0x3e, 0xe1, 0x97, 0x6f, 0x35,
- /* (2^150)P */ 0x5b, 0x53, 0x50, 0xa1, 0x1a, 0xe1, 0x51, 0xd3, 0xcc, 0x78, 0xd8, 0x1d, 0xbb, 0x45, 0x6b, 0x3e, 0x98, 0x2c, 0xd9, 0xbe, 0x28, 0x61, 0x77, 0x0c, 0xb8, 0x85, 0x28, 0x03, 0x93, 0xae, 0x34, 0x1d,
- /* (2^151)P */ 0xc3, 0xa4, 0x5b, 0xa8, 0x8c, 0x48, 0xa0, 0x4b, 0xce, 0xe6, 0x9c, 0x3c, 0xc3, 0x48, 0x53, 0x98, 0x70, 0xa7, 0xbd, 0x97, 0x6f, 0x4c, 0x12, 0x66, 0x4a, 0x12, 0x54, 0x06, 0x29, 0xa0, 0x81, 0x0f,
- /* (2^152)P */ 0xfd, 0x86, 0x9b, 0x56, 0xa6, 0x9c, 0xd0, 0x9e, 0x2d, 0x9a, 0xaf, 0x18, 0xfd, 0x09, 0x10, 0x81, 0x0a, 0xc2, 0xd8, 0x93, 0x3f, 0xd0, 0x08, 0xff, 0x6b, 0xf2, 0xae, 0x9f, 0x19, 0x48, 0xa1, 0x52,
- /* (2^153)P */ 0x73, 0x1b, 0x8d, 0x2d, 0xdc, 0xf9, 0x03, 0x3e, 0x70, 0x1a, 0x96, 0x73, 0x18, 0x80, 0x05, 0x42, 0x70, 0x59, 0xa3, 0x41, 0xf0, 0x87, 0xd9, 0xc0, 0x49, 0xd5, 0xc0, 0xa1, 0x15, 0x1f, 0xaa, 0x07,
- /* (2^154)P */ 0x24, 0x72, 0xd2, 0x8c, 0xe0, 0x6c, 0xd4, 0xdf, 0x39, 0x42, 0x4e, 0x93, 0x4f, 0x02, 0x0a, 0x6d, 0x59, 0x7b, 0x89, 0x99, 0x63, 0x7a, 0x8a, 0x80, 0xa2, 0x95, 0x3d, 0xe1, 0xe9, 0x56, 0x45, 0x0a,
- /* (2^155)P */ 0x45, 0x30, 0xc1, 0xe9, 0x1f, 0x99, 0x1a, 0xd2, 0xb8, 0x51, 0x77, 0xfe, 0x48, 0x85, 0x0e, 0x9b, 0x35, 0x00, 0xf3, 0x4b, 0xcb, 0x43, 0xa6, 0x5d, 0x21, 0xf7, 0x40, 0x39, 0xd6, 0x28, 0xdb, 0x77,
- /* (2^156)P */ 0x11, 0x90, 0xdc, 0x4a, 0x61, 0xeb, 0x5e, 0xfc, 0xeb, 0x11, 0xc4, 0xe8, 0x9a, 0x41, 0x29, 0x52, 0x74, 0xcf, 0x1d, 0x7d, 0x78, 0xe7, 0xc3, 0x9e, 0xb5, 0x4c, 0x6e, 0x21, 0x3e, 0x05, 0x0d, 0x34,
- /* (2^157)P */ 0xb4, 0xf2, 0x8d, 0xb4, 0x39, 0xaf, 0xc7, 0xca, 0x94, 0x0a, 0xa1, 0x71, 0x28, 0xec, 0xfa, 0xc0, 0xed, 0x75, 0xa5, 0x5c, 0x24, 0x69, 0x0a, 0x14, 0x4c, 0x3a, 0x27, 0x34, 0x71, 0xc3, 0xf1, 0x0c,
- /* (2^158)P */ 0xa5, 0xb8, 0x24, 0xc2, 0x6a, 0x30, 0xee, 0xc8, 0xb0, 0x30, 0x49, 0xcb, 0x7c, 0xee, 0xea, 0x57, 0x4f, 0xe7, 0xcb, 0xaa, 0xbd, 0x06, 0xe8, 0xa1, 0x7d, 0x65, 0xeb, 0x2e, 0x74, 0x62, 0x9a, 0x7d,
- /* (2^159)P */ 0x30, 0x48, 0x6c, 0x54, 0xef, 0xb6, 0xb6, 0x9e, 0x2e, 0x6e, 0xb3, 0xdd, 0x1f, 0xca, 0x5c, 0x88, 0x05, 0x71, 0x0d, 0xef, 0x83, 0xf3, 0xb9, 0xe6, 0x12, 0x04, 0x2e, 0x9d, 0xef, 0x4f, 0x65, 0x58,
- /* (2^160)P */ 0x26, 0x8e, 0x0e, 0xbe, 0xff, 0xc4, 0x05, 0xa9, 0x6e, 0x81, 0x31, 0x9b, 0xdf, 0xe5, 0x2d, 0x94, 0xe1, 0x88, 0x2e, 0x80, 0x3f, 0x72, 0x7d, 0x49, 0x8d, 0x40, 0x2f, 0x60, 0xea, 0x4d, 0x68, 0x30,
- /* (2^161)P */ 0x34, 0xcb, 0xe6, 0xa3, 0x78, 0xa2, 0xe5, 0x21, 0xc4, 0x1d, 0x15, 0x5b, 0x6f, 0x6e, 0xfb, 0xae, 0x15, 0xca, 0x77, 0x9d, 0x04, 0x8e, 0x0b, 0xb3, 0x81, 0x89, 0xb9, 0x53, 0xcf, 0xc9, 0xc3, 0x28,
- /* (2^162)P */ 0x2a, 0xdd, 0x6c, 0x55, 0x21, 0xb7, 0x7f, 0x28, 0x74, 0x22, 0x02, 0x97, 0xa8, 0x7c, 0x31, 0x0d, 0x58, 0x32, 0x54, 0x3a, 0x42, 0xc7, 0x68, 0x74, 0x2f, 0x64, 0xb5, 0x4e, 0x46, 0x11, 0x7f, 0x4a,
- /* (2^163)P */ 0xa6, 0x3a, 0x19, 0x4d, 0x77, 0xa4, 0x37, 0xa2, 0xa1, 0x29, 0x21, 0xa9, 0x6e, 0x98, 0x65, 0xd8, 0x88, 0x1a, 0x7c, 0xf8, 0xec, 0x15, 0xc5, 0x24, 0xeb, 0xf5, 0x39, 0x5f, 0x57, 0x03, 0x40, 0x60,
- /* (2^164)P */ 0x27, 0x9b, 0x0a, 0x57, 0x89, 0xf1, 0xb9, 0x47, 0x78, 0x4b, 0x5e, 0x46, 0xde, 0xce, 0x98, 0x2b, 0x20, 0x5c, 0xb8, 0xdb, 0x51, 0xf5, 0x6d, 0x02, 0x01, 0x19, 0xe2, 0x47, 0x10, 0xd9, 0xfc, 0x74,
- /* (2^165)P */ 0xa3, 0xbf, 0xc1, 0x23, 0x0a, 0xa9, 0xe2, 0x13, 0xf6, 0x19, 0x85, 0x47, 0x4e, 0x07, 0xb0, 0x0c, 0x44, 0xcf, 0xf6, 0x3a, 0xbe, 0xcb, 0xf1, 0x5f, 0xbe, 0x2d, 0x81, 0xbe, 0x38, 0x54, 0xfe, 0x67,
- /* (2^166)P */ 0xb0, 0x05, 0x0f, 0xa4, 0x4f, 0xf6, 0x3c, 0xd1, 0x87, 0x37, 0x28, 0x32, 0x2f, 0xfb, 0x4d, 0x05, 0xea, 0x2a, 0x0d, 0x7f, 0x5b, 0x91, 0x73, 0x41, 0x4e, 0x0d, 0x61, 0x1f, 0x4f, 0x14, 0x2f, 0x48,
- /* (2^167)P */ 0x34, 0x82, 0x7f, 0xb4, 0x01, 0x02, 0x21, 0xf6, 0x90, 0xb9, 0x70, 0x9e, 0x92, 0xe1, 0x0a, 0x5d, 0x7c, 0x56, 0x49, 0xb0, 0x55, 0xf4, 0xd7, 0xdc, 0x01, 0x6f, 0x91, 0xf0, 0xf1, 0xd0, 0x93, 0x7e,
- /* (2^168)P */ 0xfa, 0xb4, 0x7d, 0x8a, 0xf1, 0xcb, 0x79, 0xdd, 0x2f, 0xc6, 0x74, 0x6f, 0xbf, 0x91, 0x83, 0xbe, 0xbd, 0x91, 0x82, 0x4b, 0xd1, 0x45, 0x71, 0x02, 0x05, 0x17, 0xbf, 0x2c, 0xea, 0x73, 0x5a, 0x58,
- /* (2^169)P */ 0xb2, 0x0d, 0x8a, 0x92, 0x3e, 0xa0, 0x5c, 0x48, 0xe7, 0x57, 0x28, 0x74, 0xa5, 0x01, 0xfc, 0x10, 0xa7, 0x51, 0xd5, 0xd6, 0xdb, 0x2e, 0x48, 0x2f, 0x8a, 0xdb, 0x8f, 0x04, 0xb5, 0x33, 0x04, 0x0f,
- /* (2^170)P */ 0x47, 0x62, 0xdc, 0xd7, 0x8d, 0x2e, 0xda, 0x60, 0x9a, 0x81, 0xd4, 0x8c, 0xd3, 0xc9, 0xb4, 0x88, 0x97, 0x66, 0xf6, 0x01, 0xc0, 0x3a, 0x03, 0x13, 0x75, 0x7d, 0x36, 0x3b, 0xfe, 0x24, 0x3b, 0x27,
- /* (2^171)P */ 0xd4, 0xb9, 0xb3, 0x31, 0x6a, 0xf6, 0xe8, 0xc6, 0xd5, 0x49, 0xdf, 0x94, 0xa4, 0x14, 0x15, 0x28, 0xa7, 0x3d, 0xb2, 0xc8, 0xdf, 0x6f, 0x72, 0xd1, 0x48, 0xe5, 0xde, 0x03, 0xd1, 0xe7, 0x3a, 0x4b,
- /* (2^172)P */ 0x7e, 0x9d, 0x4b, 0xce, 0x19, 0x6e, 0x25, 0xc6, 0x1c, 0xc6, 0xe3, 0x86, 0xf1, 0x5c, 0x5c, 0xff, 0x45, 0xc1, 0x8e, 0x4b, 0xa3, 0x3c, 0xc6, 0xac, 0x74, 0x65, 0xe6, 0xfe, 0x88, 0x18, 0x62, 0x74,
- /* (2^173)P */ 0x1e, 0x0a, 0x29, 0x45, 0x96, 0x40, 0x6f, 0x95, 0x2e, 0x96, 0x3a, 0x26, 0xe3, 0xf8, 0x0b, 0xef, 0x7b, 0x64, 0xc2, 0x5e, 0xeb, 0x50, 0x6a, 0xed, 0x02, 0x75, 0xca, 0x9d, 0x3a, 0x28, 0x94, 0x06,
- /* (2^174)P */ 0xd1, 0xdc, 0xa2, 0x43, 0x36, 0x96, 0x9b, 0x76, 0x53, 0x53, 0xfc, 0x09, 0xea, 0xc8, 0xb7, 0x42, 0xab, 0x7e, 0x39, 0x13, 0xee, 0x2a, 0x00, 0x4f, 0x3a, 0xd6, 0xb7, 0x19, 0x2c, 0x5e, 0x00, 0x63,
- /* (2^175)P */ 0xea, 0x3b, 0x02, 0x63, 0xda, 0x36, 0x67, 0xca, 0xb7, 0x99, 0x2a, 0xb1, 0x6d, 0x7f, 0x6c, 0x96, 0xe1, 0xc5, 0x37, 0xc5, 0x90, 0x93, 0xe0, 0xac, 0xee, 0x89, 0xaa, 0xa1, 0x63, 0x60, 0x69, 0x0b,
- /* (2^176)P */ 0xe5, 0x56, 0x8c, 0x28, 0x97, 0x3e, 0xb0, 0xeb, 0xe8, 0x8b, 0x8c, 0x93, 0x9f, 0x9f, 0x2a, 0x43, 0x71, 0x7f, 0x71, 0x5b, 0x3d, 0xa9, 0xa5, 0xa6, 0x97, 0x9d, 0x8f, 0xe1, 0xc3, 0xb4, 0x5f, 0x1a,
- /* (2^177)P */ 0xce, 0xcd, 0x60, 0x1c, 0xad, 0xe7, 0x94, 0x1c, 0xa0, 0xc4, 0x02, 0xfc, 0x43, 0x2a, 0x20, 0xee, 0x20, 0x6a, 0xc4, 0x67, 0xd8, 0xe4, 0xaf, 0x8d, 0x58, 0x7b, 0xc2, 0x8a, 0x3c, 0x26, 0x10, 0x0a,
- /* (2^178)P */ 0x4a, 0x2a, 0x43, 0xe4, 0xdf, 0xa9, 0xde, 0xd0, 0xc5, 0x77, 0x92, 0xbe, 0x7b, 0xf8, 0x6a, 0x85, 0x1a, 0xc7, 0x12, 0xc2, 0xac, 0x72, 0x84, 0xce, 0x91, 0x1e, 0xbb, 0x9b, 0x6d, 0x1b, 0x15, 0x6f,
- /* (2^179)P */ 0x6a, 0xd5, 0xee, 0x7c, 0x52, 0x6c, 0x77, 0x26, 0xec, 0xfa, 0xf8, 0xfb, 0xb7, 0x1c, 0x21, 0x7d, 0xcc, 0x09, 0x46, 0xfd, 0xa6, 0x66, 0xae, 0x37, 0x42, 0x0c, 0x77, 0xd2, 0x02, 0xb7, 0x81, 0x1f,
- /* (2^180)P */ 0x92, 0x83, 0xc5, 0xea, 0x57, 0xb0, 0xb0, 0x2f, 0x9d, 0x4e, 0x74, 0x29, 0xfe, 0x89, 0xdd, 0xe1, 0xf8, 0xb4, 0xbe, 0x17, 0xeb, 0xf8, 0x64, 0xc9, 0x1e, 0xd4, 0xa2, 0xc9, 0x73, 0x10, 0x57, 0x29,
- /* (2^181)P */ 0x54, 0xe2, 0xc0, 0x81, 0x89, 0xa1, 0x48, 0xa9, 0x30, 0x28, 0xb2, 0x65, 0x9b, 0x36, 0xf6, 0x2d, 0xc6, 0xd3, 0xcf, 0x5f, 0xd7, 0xb2, 0x3e, 0xa3, 0x1f, 0xa0, 0x99, 0x41, 0xec, 0xd6, 0x8c, 0x07,
- /* (2^182)P */ 0x2f, 0x0d, 0x90, 0xad, 0x41, 0x4a, 0x58, 0x4a, 0x52, 0x4c, 0xc7, 0xe2, 0x78, 0x2b, 0x14, 0x32, 0x78, 0xc9, 0x31, 0x84, 0x33, 0xe8, 0xc4, 0x68, 0xc2, 0x9f, 0x68, 0x08, 0x90, 0xea, 0x69, 0x7f,
- /* (2^183)P */ 0x65, 0x82, 0xa3, 0x46, 0x1e, 0xc8, 0xf2, 0x52, 0xfd, 0x32, 0xa8, 0x04, 0x2d, 0x07, 0x78, 0xfd, 0x94, 0x9e, 0x35, 0x25, 0xfa, 0xd5, 0xd7, 0x8c, 0xd2, 0x29, 0xcc, 0x54, 0x74, 0x1b, 0xe7, 0x4d,
- /* (2^184)P */ 0xc9, 0x6a, 0xda, 0x1e, 0xad, 0x60, 0xeb, 0x42, 0x3a, 0x9c, 0xc0, 0xdb, 0xdf, 0x37, 0xad, 0x0a, 0x91, 0xc1, 0x3c, 0xe3, 0x71, 0x4b, 0x00, 0x81, 0x3c, 0x80, 0x22, 0x51, 0x34, 0xbe, 0xe6, 0x44,
- /* (2^185)P */ 0xdb, 0x20, 0x19, 0xba, 0x88, 0x83, 0xfe, 0x03, 0x08, 0xb0, 0x0d, 0x15, 0x32, 0x7c, 0xd5, 0xf5, 0x29, 0x0c, 0xf6, 0x1a, 0x28, 0xc4, 0xc8, 0x49, 0xee, 0x1a, 0x70, 0xde, 0x18, 0xb5, 0xed, 0x21,
- /* (2^186)P */ 0x99, 0xdc, 0x06, 0x8f, 0x41, 0x3e, 0xb6, 0x7f, 0xb8, 0xd7, 0x66, 0xc1, 0x99, 0x0d, 0x46, 0xa4, 0x83, 0x0a, 0x52, 0xce, 0x48, 0x52, 0xdd, 0x24, 0x58, 0x83, 0x92, 0x2b, 0x71, 0xad, 0xc3, 0x5e,
- /* (2^187)P */ 0x0f, 0x93, 0x17, 0xbd, 0x5f, 0x2a, 0x02, 0x15, 0xe3, 0x70, 0x25, 0xd8, 0x77, 0x4a, 0xf6, 0xa4, 0x12, 0x37, 0x78, 0x15, 0x69, 0x8d, 0xbc, 0x12, 0xbb, 0x0a, 0x62, 0xfc, 0xc0, 0x94, 0x81, 0x49,
- /* (2^188)P */ 0x82, 0x6c, 0x68, 0x55, 0xd2, 0xd9, 0xa2, 0x38, 0xf0, 0x21, 0x3e, 0x19, 0xd9, 0x6b, 0x5c, 0x78, 0x84, 0x54, 0x4a, 0xb2, 0x1a, 0xc8, 0xd5, 0xe4, 0x89, 0x09, 0xe2, 0xb2, 0x60, 0x78, 0x30, 0x56,
- /* (2^189)P */ 0xc4, 0x74, 0x4d, 0x8b, 0xf7, 0x55, 0x9d, 0x42, 0x31, 0x01, 0x35, 0x43, 0x46, 0x83, 0xf1, 0x22, 0xff, 0x1f, 0xc7, 0x98, 0x45, 0xc2, 0x60, 0x1e, 0xef, 0x83, 0x99, 0x97, 0x14, 0xf0, 0xf2, 0x59,
- /* (2^190)P */ 0x44, 0x4a, 0x49, 0xeb, 0x56, 0x7d, 0xa4, 0x46, 0x8e, 0xa1, 0x36, 0xd6, 0x54, 0xa8, 0x22, 0x3e, 0x3b, 0x1c, 0x49, 0x74, 0x52, 0xe1, 0x46, 0xb3, 0xe7, 0xcd, 0x90, 0x53, 0x4e, 0xfd, 0xea, 0x2c,
- /* (2^191)P */ 0x75, 0x66, 0x0d, 0xbe, 0x38, 0x85, 0x8a, 0xba, 0x23, 0x8e, 0x81, 0x50, 0xbb, 0x74, 0x90, 0x4b, 0xc3, 0x04, 0xd3, 0x85, 0x90, 0xb8, 0xda, 0xcb, 0xc4, 0x92, 0x61, 0xe5, 0xe0, 0x4f, 0xa2, 0x61,
- /* (2^192)P */ 0xcb, 0x5b, 0x52, 0xdb, 0xe6, 0x15, 0x76, 0xcb, 0xca, 0xe4, 0x67, 0xa5, 0x35, 0x8c, 0x7d, 0xdd, 0x69, 0xdd, 0xfc, 0xca, 0x3a, 0x15, 0xb4, 0xe6, 0x66, 0x97, 0x3c, 0x7f, 0x09, 0x8e, 0x66, 0x2d,
- /* (2^193)P */ 0xf0, 0x5e, 0xe5, 0x5c, 0x26, 0x7e, 0x7e, 0xa5, 0x67, 0xb9, 0xd4, 0x7c, 0x52, 0x4e, 0x9f, 0x5d, 0xe5, 0xd1, 0x2f, 0x49, 0x06, 0x36, 0xc8, 0xfb, 0xae, 0xf7, 0xc3, 0xb7, 0xbe, 0x52, 0x0d, 0x09,
- /* (2^194)P */ 0x7c, 0x4d, 0x7b, 0x1e, 0x5a, 0x51, 0xb9, 0x09, 0xc0, 0x44, 0xda, 0x99, 0x25, 0x6a, 0x26, 0x1f, 0x04, 0x55, 0xc5, 0xe2, 0x48, 0x95, 0xc4, 0xa1, 0xcc, 0x15, 0x6f, 0x12, 0x87, 0x42, 0xf0, 0x7e,
- /* (2^195)P */ 0x15, 0xef, 0x30, 0xbd, 0x9d, 0x65, 0xd1, 0xfe, 0x7b, 0x27, 0xe0, 0xc4, 0xee, 0xb9, 0x4a, 0x8b, 0x91, 0x32, 0xdf, 0xa5, 0x36, 0x62, 0x4d, 0x88, 0x88, 0xf7, 0x5c, 0xbf, 0xa6, 0x6e, 0xd9, 0x1f,
- /* (2^196)P */ 0x9a, 0x0d, 0x19, 0x1f, 0x98, 0x61, 0xa1, 0x42, 0xc1, 0x52, 0x60, 0x7e, 0x50, 0x49, 0xd8, 0x61, 0xd5, 0x2c, 0x5a, 0x28, 0xbf, 0x13, 0xe1, 0x9f, 0xd8, 0x85, 0xad, 0xdb, 0x76, 0xd6, 0x22, 0x7c,
- /* (2^197)P */ 0x7d, 0xd2, 0xfb, 0x2b, 0xed, 0x70, 0xe7, 0x82, 0xa5, 0xf5, 0x96, 0xe9, 0xec, 0xb2, 0x05, 0x4c, 0x50, 0x01, 0x90, 0xb0, 0xc2, 0xa9, 0x40, 0xcd, 0x64, 0xbf, 0xd9, 0x13, 0x92, 0x31, 0x95, 0x58,
- /* (2^198)P */ 0x08, 0x2e, 0xea, 0x3f, 0x70, 0x5d, 0xcc, 0xe7, 0x8c, 0x18, 0xe2, 0x58, 0x12, 0x49, 0x0c, 0xb5, 0xf0, 0x5b, 0x20, 0x48, 0xaa, 0x0b, 0xe3, 0xcc, 0x62, 0x2d, 0xa3, 0xcf, 0x9c, 0x65, 0x7c, 0x53,
- /* (2^199)P */ 0x88, 0xc0, 0xcf, 0x98, 0x3a, 0x62, 0xb6, 0x37, 0xa4, 0xac, 0xd6, 0xa4, 0x1f, 0xed, 0x9b, 0xfe, 0xb0, 0xd1, 0xa8, 0x56, 0x8e, 0x9b, 0xd2, 0x04, 0x75, 0x95, 0x51, 0x0b, 0xc4, 0x71, 0x5f, 0x72,
- /* (2^200)P */ 0xe6, 0x9c, 0x33, 0xd0, 0x9c, 0xf8, 0xc7, 0x28, 0x8b, 0xc1, 0xdd, 0x69, 0x44, 0xb1, 0x67, 0x83, 0x2c, 0x65, 0xa1, 0xa6, 0x83, 0xda, 0x3a, 0x88, 0x17, 0x6c, 0x4d, 0x03, 0x74, 0x19, 0x5f, 0x58,
- /* (2^201)P */ 0x88, 0x91, 0xb1, 0xf1, 0x66, 0xb2, 0xcf, 0x89, 0x17, 0x52, 0xc3, 0xe7, 0x63, 0x48, 0x3b, 0xe6, 0x6a, 0x52, 0xc0, 0xb4, 0xa6, 0x9d, 0x8c, 0xd8, 0x35, 0x46, 0x95, 0xf0, 0x9d, 0x5c, 0x03, 0x3e,
- /* (2^202)P */ 0x9d, 0xde, 0x45, 0xfb, 0x12, 0x54, 0x9d, 0xdd, 0x0d, 0xf4, 0xcf, 0xe4, 0x32, 0x45, 0x68, 0xdd, 0x1c, 0x67, 0x1d, 0x15, 0x9b, 0x99, 0x5c, 0x4b, 0x90, 0xf6, 0xe7, 0x11, 0xc8, 0x2c, 0x8c, 0x2d,
- /* (2^203)P */ 0x40, 0x5d, 0x05, 0x90, 0x1d, 0xbe, 0x54, 0x7f, 0x40, 0xaf, 0x4a, 0x46, 0xdf, 0xc5, 0x64, 0xa4, 0xbe, 0x17, 0xe9, 0xf0, 0x24, 0x96, 0x97, 0x33, 0x30, 0x6b, 0x35, 0x27, 0xc5, 0x8d, 0x01, 0x2c,
- /* (2^204)P */ 0xd4, 0xb3, 0x30, 0xe3, 0x24, 0x50, 0x41, 0xa5, 0xd3, 0x52, 0x16, 0x69, 0x96, 0x3d, 0xff, 0x73, 0xf1, 0x59, 0x9b, 0xef, 0xc4, 0x42, 0xec, 0x94, 0x5a, 0x8e, 0xd0, 0x18, 0x16, 0x20, 0x47, 0x07,
- /* (2^205)P */ 0x53, 0x1c, 0x41, 0xca, 0x8a, 0xa4, 0x6c, 0x4d, 0x19, 0x61, 0xa6, 0xcf, 0x2f, 0x5f, 0x41, 0x66, 0xff, 0x27, 0xe2, 0x51, 0x00, 0xd4, 0x4d, 0x9c, 0xeb, 0xf7, 0x02, 0x9a, 0xc0, 0x0b, 0x81, 0x59,
- /* (2^206)P */ 0x1d, 0x10, 0xdc, 0xb3, 0x71, 0xb1, 0x7e, 0x2a, 0x8e, 0xf6, 0xfe, 0x9f, 0xb9, 0x5a, 0x1c, 0x44, 0xea, 0x59, 0xb3, 0x93, 0x9b, 0x5c, 0x02, 0x32, 0x2f, 0x11, 0x9d, 0x1e, 0xa7, 0xe0, 0x8c, 0x5e,
- /* (2^207)P */ 0xfd, 0x03, 0x95, 0x42, 0x92, 0xcb, 0xcc, 0xbf, 0x55, 0x5d, 0x09, 0x2f, 0x75, 0xba, 0x71, 0xd2, 0x1e, 0x09, 0x2d, 0x97, 0x5e, 0xad, 0x5e, 0x34, 0xba, 0x03, 0x31, 0xa8, 0x11, 0xdf, 0xc8, 0x18,
- /* (2^208)P */ 0x4c, 0x0f, 0xed, 0x9a, 0x9a, 0x94, 0xcd, 0x90, 0x7e, 0xe3, 0x60, 0x66, 0xcb, 0xf4, 0xd1, 0xc5, 0x0b, 0x2e, 0xc5, 0x56, 0x2d, 0xc5, 0xca, 0xb8, 0x0d, 0x8e, 0x80, 0xc5, 0x00, 0xe4, 0x42, 0x6e,
- /* (2^209)P */ 0x23, 0xfd, 0xae, 0xee, 0x66, 0x69, 0xb4, 0xa3, 0xca, 0xcd, 0x9e, 0xe3, 0x0b, 0x1f, 0x4f, 0x0c, 0x1d, 0xa5, 0x83, 0xd6, 0xc9, 0xc8, 0x9d, 0x18, 0x1b, 0x35, 0x09, 0x4c, 0x05, 0x7f, 0xf2, 0x51,
- /* (2^210)P */ 0x82, 0x06, 0x32, 0x2a, 0xcd, 0x7c, 0x48, 0x4c, 0x96, 0x1c, 0xdf, 0xb3, 0x5b, 0xa9, 0x7e, 0x58, 0xe8, 0xb8, 0x5c, 0x55, 0x9e, 0xf7, 0xcc, 0xc8, 0x3d, 0xd7, 0x06, 0xa2, 0x29, 0xc8, 0x7d, 0x54,
- /* (2^211)P */ 0x06, 0x9b, 0xc3, 0x80, 0xcd, 0xa6, 0x22, 0xb8, 0xc6, 0xd4, 0x00, 0x20, 0x73, 0x54, 0x6d, 0xe9, 0x4d, 0x3b, 0x46, 0x91, 0x6f, 0x5b, 0x53, 0x28, 0x1d, 0x6e, 0x48, 0xe2, 0x60, 0x46, 0x8f, 0x22,
- /* (2^212)P */ 0xbf, 0x3a, 0x8d, 0xde, 0x38, 0x95, 0x79, 0x98, 0x6e, 0xca, 0xeb, 0x45, 0x00, 0x33, 0xd8, 0x8c, 0x38, 0xe7, 0x21, 0x82, 0x00, 0x2a, 0x95, 0x79, 0xbb, 0xd2, 0x5c, 0x53, 0xa7, 0xe1, 0x22, 0x43,
- /* (2^213)P */ 0x1c, 0x80, 0xd1, 0x19, 0x18, 0xc1, 0x14, 0xb1, 0xc7, 0x5e, 0x3f, 0x4f, 0xd8, 0xe4, 0x16, 0x20, 0x4c, 0x0f, 0x26, 0x09, 0xf4, 0x2d, 0x0e, 0xdd, 0x66, 0x72, 0x5f, 0xae, 0xc0, 0x62, 0xc3, 0x5e,
- /* (2^214)P */ 0xee, 0xb4, 0xb2, 0xb8, 0x18, 0x2b, 0x46, 0xc0, 0xfb, 0x1a, 0x4d, 0x27, 0x50, 0xd9, 0xc8, 0x7c, 0xd2, 0x02, 0x6b, 0x43, 0x05, 0x71, 0x5f, 0xf2, 0xd3, 0xcc, 0xf9, 0xbf, 0xdc, 0xf8, 0xbb, 0x43,
- /* (2^215)P */ 0xdf, 0xe9, 0x39, 0xa0, 0x67, 0x17, 0xad, 0xb6, 0x83, 0x35, 0x9d, 0xf6, 0xa8, 0x4d, 0x71, 0xb0, 0xf5, 0x31, 0x29, 0xb4, 0x18, 0xfa, 0x55, 0x5e, 0x61, 0x09, 0xc6, 0x33, 0x8f, 0x55, 0xd5, 0x4e,
- /* (2^216)P */ 0xdd, 0xa5, 0x47, 0xc6, 0x01, 0x79, 0xe3, 0x1f, 0x57, 0xd3, 0x81, 0x80, 0x1f, 0xdf, 0x3d, 0x59, 0xa6, 0xd7, 0x3f, 0x81, 0xfd, 0xa4, 0x49, 0x02, 0x61, 0xaf, 0x9c, 0x4e, 0x27, 0xca, 0xac, 0x69,
- /* (2^217)P */ 0xc9, 0x21, 0x07, 0x33, 0xea, 0xa3, 0x7b, 0x04, 0xa0, 0x1e, 0x7e, 0x0e, 0xc2, 0x3f, 0x42, 0x83, 0x60, 0x4a, 0x31, 0x01, 0xaf, 0xc0, 0xf4, 0x1d, 0x27, 0x95, 0x28, 0x89, 0xab, 0x2d, 0xa6, 0x09,
- /* (2^218)P */ 0x00, 0xcb, 0xc6, 0x9c, 0xa4, 0x25, 0xb3, 0xa5, 0xb6, 0x6c, 0xb5, 0x54, 0xc6, 0x5d, 0x4b, 0xe9, 0xa0, 0x94, 0xc9, 0xad, 0x79, 0x87, 0xe2, 0x3b, 0xad, 0x4a, 0x3a, 0xba, 0xf8, 0xe8, 0x96, 0x42,
- /* (2^219)P */ 0xab, 0x1e, 0x45, 0x1e, 0x76, 0x89, 0x86, 0x32, 0x4a, 0x59, 0x59, 0xff, 0x8b, 0x59, 0x4d, 0x2e, 0x4a, 0x08, 0xa7, 0xd7, 0x53, 0x68, 0xb9, 0x49, 0xa8, 0x20, 0x14, 0x60, 0x19, 0xa3, 0x80, 0x49,
- /* (2^220)P */ 0x42, 0x2c, 0x55, 0x2f, 0xe1, 0xb9, 0x65, 0x95, 0x96, 0xfe, 0x00, 0x71, 0xdb, 0x18, 0x53, 0x8a, 0xd7, 0xd0, 0xad, 0x43, 0x4d, 0x0b, 0xc9, 0x05, 0xda, 0x4e, 0x5d, 0x6a, 0xd6, 0x4c, 0x8b, 0x53,
- /* (2^221)P */ 0x9f, 0x03, 0x9f, 0xe8, 0xc3, 0x4f, 0xe9, 0xf4, 0x45, 0x80, 0x61, 0x6f, 0xf2, 0x9a, 0x2c, 0x59, 0x50, 0x95, 0x4b, 0xfd, 0xb5, 0x6e, 0xa3, 0x08, 0x19, 0x14, 0xed, 0xc2, 0xf6, 0xfa, 0xff, 0x25,
- /* (2^222)P */ 0x54, 0xd3, 0x79, 0xcc, 0x59, 0x44, 0x43, 0x34, 0x6b, 0x47, 0xd5, 0xb1, 0xb4, 0xbf, 0xec, 0xee, 0x99, 0x5d, 0x61, 0x61, 0xa0, 0x34, 0xeb, 0xdd, 0x73, 0xb7, 0x64, 0xeb, 0xcc, 0xce, 0x29, 0x51,
- /* (2^223)P */ 0x20, 0x35, 0x99, 0x94, 0x58, 0x21, 0x43, 0xee, 0x3b, 0x0b, 0x4c, 0xf1, 0x7c, 0x9c, 0x2f, 0x77, 0xd5, 0xda, 0xbe, 0x06, 0xe3, 0xfc, 0xe2, 0xd2, 0x97, 0x6a, 0xf0, 0x46, 0xb5, 0x42, 0x5f, 0x71,
- /* (2^224)P */ 0x1a, 0x5f, 0x5b, 0xda, 0xce, 0xcd, 0x4e, 0x43, 0xa9, 0x41, 0x97, 0xa4, 0x15, 0x71, 0xa1, 0x0d, 0x2e, 0xad, 0xed, 0x73, 0x7c, 0xd7, 0x0b, 0x68, 0x41, 0x90, 0xdd, 0x4e, 0x35, 0x02, 0x7c, 0x48,
- /* (2^225)P */ 0xc4, 0xd9, 0x0e, 0xa7, 0xf3, 0xef, 0xef, 0xb8, 0x02, 0xe3, 0x57, 0xe8, 0xa3, 0x2a, 0xa3, 0x56, 0xa0, 0xa5, 0xa2, 0x48, 0xbd, 0x68, 0x3a, 0xdf, 0x44, 0xc4, 0x76, 0x31, 0xb7, 0x50, 0xf6, 0x07,
- /* (2^226)P */ 0xb1, 0xcc, 0xe0, 0x26, 0x16, 0x9b, 0x8b, 0xe3, 0x36, 0xfb, 0x09, 0x8b, 0xc1, 0x53, 0xe0, 0x79, 0x64, 0x49, 0xf9, 0xc9, 0x19, 0x03, 0xd9, 0x56, 0xc4, 0xf5, 0x9f, 0xac, 0xe7, 0x41, 0xa9, 0x1c,
- /* (2^227)P */ 0xbb, 0xa0, 0x2f, 0x16, 0x29, 0xdf, 0xc4, 0x49, 0x05, 0x33, 0xb3, 0x82, 0x32, 0xcf, 0x88, 0x84, 0x7d, 0x43, 0xbb, 0xca, 0x14, 0xda, 0xdf, 0x95, 0x86, 0xad, 0xd5, 0x64, 0x82, 0xf7, 0x91, 0x33,
- /* (2^228)P */ 0x5d, 0x09, 0xb5, 0xe2, 0x6a, 0xe0, 0x9a, 0x72, 0x46, 0xa9, 0x59, 0x32, 0xd7, 0x58, 0x8a, 0xd5, 0xed, 0x21, 0x39, 0xd1, 0x62, 0x42, 0x83, 0xe9, 0x92, 0xb5, 0x4b, 0xa5, 0xfa, 0xda, 0xfe, 0x27,
- /* (2^229)P */ 0xbb, 0x48, 0xad, 0x29, 0xb8, 0xc5, 0x9d, 0xa9, 0x60, 0xe2, 0x9e, 0x49, 0x42, 0x57, 0x02, 0x5f, 0xfd, 0x13, 0x75, 0x5d, 0xcd, 0x8e, 0x2c, 0x80, 0x38, 0xd9, 0x6d, 0x3f, 0xef, 0xb3, 0xce, 0x78,
- /* (2^230)P */ 0x94, 0x5d, 0x13, 0x8a, 0x4f, 0xf4, 0x42, 0xc3, 0xa3, 0xdd, 0x8c, 0x82, 0x44, 0xdb, 0x9e, 0x7b, 0xe7, 0xcf, 0x37, 0x05, 0x1a, 0xd1, 0x36, 0x94, 0xc8, 0xb4, 0x1a, 0xec, 0x64, 0xb1, 0x64, 0x50,
- /* (2^231)P */ 0xfc, 0xb2, 0x7e, 0xd3, 0xcf, 0xec, 0x20, 0x70, 0xfc, 0x25, 0x0d, 0xd9, 0x3e, 0xea, 0x31, 0x1f, 0x34, 0xbb, 0xa1, 0xdf, 0x7b, 0x0d, 0x93, 0x1b, 0x44, 0x30, 0x11, 0x48, 0x7a, 0x46, 0x44, 0x53,
- /* (2^232)P */ 0xfb, 0x6d, 0x5e, 0xf2, 0x70, 0x31, 0x07, 0x70, 0xc8, 0x4c, 0x11, 0x50, 0x1a, 0xdc, 0x85, 0xe3, 0x00, 0x4f, 0xfc, 0xc8, 0x8a, 0x69, 0x48, 0x23, 0xd8, 0x40, 0xdd, 0x84, 0x52, 0xa5, 0x77, 0x2a,
- /* (2^233)P */ 0xe4, 0x6c, 0x8c, 0xc9, 0xe0, 0xaf, 0x06, 0xfe, 0xe4, 0xd6, 0xdf, 0xdd, 0x96, 0xdf, 0x35, 0xc2, 0xd3, 0x1e, 0xbf, 0x33, 0x1e, 0xd0, 0x28, 0x14, 0xaf, 0xbd, 0x00, 0x93, 0xec, 0x68, 0x57, 0x78,
- /* (2^234)P */ 0x3b, 0xb6, 0xde, 0x91, 0x7a, 0xe5, 0x02, 0x97, 0x80, 0x8b, 0xce, 0xe5, 0xbf, 0xb8, 0xbd, 0x61, 0xac, 0x58, 0x1d, 0x3d, 0x6f, 0x42, 0x5b, 0x64, 0xbc, 0x57, 0xa5, 0x27, 0x22, 0xa8, 0x04, 0x48,
- /* (2^235)P */ 0x01, 0x26, 0x4d, 0xb4, 0x8a, 0x04, 0x57, 0x8e, 0x35, 0x69, 0x3a, 0x4b, 0x1a, 0x50, 0xd6, 0x68, 0x93, 0xc2, 0xe1, 0xf9, 0xc3, 0x9e, 0x9c, 0xc3, 0xe2, 0x63, 0xde, 0xd4, 0x57, 0xf2, 0x72, 0x41,
- /* (2^236)P */ 0x01, 0x64, 0x0c, 0x33, 0x50, 0xb4, 0x68, 0xd3, 0x91, 0x23, 0x8f, 0x41, 0x17, 0x30, 0x0d, 0x04, 0x0d, 0xd9, 0xb7, 0x90, 0x60, 0xbb, 0x34, 0x2c, 0x1f, 0xd5, 0xdf, 0x8f, 0x22, 0x49, 0xf6, 0x16,
- /* (2^237)P */ 0xf5, 0x8e, 0x92, 0x2b, 0x8e, 0x81, 0xa6, 0xbe, 0x72, 0x1e, 0xc1, 0xcd, 0x91, 0xcf, 0x8c, 0xe2, 0xcd, 0x36, 0x7a, 0xe7, 0x68, 0xaa, 0x4a, 0x59, 0x0f, 0xfd, 0x7f, 0x6c, 0x80, 0x34, 0x30, 0x31,
- /* (2^238)P */ 0x65, 0xbd, 0x49, 0x22, 0xac, 0x27, 0x9d, 0x8a, 0x12, 0x95, 0x8e, 0x01, 0x64, 0xb4, 0xa3, 0x19, 0xc7, 0x7e, 0xb3, 0x52, 0xf3, 0xcf, 0x6c, 0xc2, 0x21, 0x7b, 0x79, 0x1d, 0x34, 0x68, 0x6f, 0x05,
- /* (2^239)P */ 0x27, 0x23, 0xfd, 0x7e, 0x75, 0xd6, 0x79, 0x5e, 0x15, 0xfe, 0x3a, 0x55, 0xb6, 0xbc, 0xbd, 0xfa, 0x60, 0x5a, 0xaf, 0x6e, 0x2c, 0x22, 0xe7, 0xd3, 0x3b, 0x74, 0xae, 0x4d, 0x6d, 0xc7, 0x46, 0x70,
- /* (2^240)P */ 0x55, 0x4a, 0x8d, 0xb1, 0x72, 0xe8, 0x0b, 0x66, 0x96, 0x14, 0x4e, 0x57, 0x18, 0x25, 0x99, 0x19, 0xbb, 0xdc, 0x2b, 0x30, 0x3a, 0x05, 0x03, 0xc1, 0x8e, 0x8e, 0x21, 0x0b, 0x80, 0xe9, 0xd8, 0x3e,
- /* (2^241)P */ 0x3e, 0xe0, 0x75, 0xfa, 0x39, 0x92, 0x0b, 0x7b, 0x83, 0xc0, 0x33, 0x46, 0x68, 0xfb, 0xe9, 0xef, 0x93, 0x77, 0x1a, 0x39, 0xbe, 0x5f, 0xa3, 0x98, 0x34, 0xfe, 0xd0, 0xe2, 0x0f, 0x51, 0x65, 0x60,
- /* (2^242)P */ 0x0c, 0xad, 0xab, 0x48, 0x85, 0x66, 0xcb, 0x55, 0x27, 0xe5, 0x87, 0xda, 0x48, 0x45, 0x58, 0xb4, 0xdd, 0xc1, 0x07, 0x01, 0xea, 0xec, 0x43, 0x2c, 0x35, 0xde, 0x72, 0x93, 0x80, 0x28, 0x60, 0x52,
- /* (2^243)P */ 0x1f, 0x3b, 0x21, 0xf9, 0x6a, 0xc5, 0x15, 0x34, 0xdb, 0x98, 0x7e, 0x01, 0x4d, 0x1a, 0xee, 0x5b, 0x9b, 0x70, 0xcf, 0xb5, 0x05, 0xb1, 0xf6, 0x13, 0xb6, 0x9a, 0xb2, 0x82, 0x34, 0x0e, 0xf2, 0x5f,
- /* (2^244)P */ 0x90, 0x6c, 0x2e, 0xcc, 0x75, 0x9c, 0xa2, 0x0a, 0x06, 0xe2, 0x70, 0x3a, 0xca, 0x73, 0x7d, 0xfc, 0x15, 0xc5, 0xb5, 0xc4, 0x8f, 0xc3, 0x9f, 0x89, 0x07, 0xc2, 0xff, 0x24, 0xb1, 0x86, 0x03, 0x25,
- /* (2^245)P */ 0x56, 0x2b, 0x3d, 0xae, 0xd5, 0x28, 0xea, 0x54, 0xce, 0x60, 0xde, 0xd6, 0x9d, 0x14, 0x13, 0x99, 0xc1, 0xd6, 0x06, 0x8f, 0xc5, 0x4f, 0x69, 0x16, 0xc7, 0x8f, 0x01, 0xeb, 0x75, 0x39, 0xb2, 0x46,
- /* (2^246)P */ 0xe2, 0xb4, 0xb7, 0xb4, 0x0f, 0x6a, 0x0a, 0x47, 0xde, 0x53, 0x72, 0x8f, 0x5a, 0x47, 0x92, 0x5d, 0xdb, 0x3a, 0xbd, 0x2f, 0xb5, 0xe5, 0xee, 0xab, 0x68, 0x69, 0x80, 0xa0, 0x01, 0x08, 0xa2, 0x7f,
- /* (2^247)P */ 0xd2, 0x14, 0x77, 0x9f, 0xf1, 0xfa, 0xf3, 0x76, 0xc3, 0x60, 0x46, 0x2f, 0xc1, 0x40, 0xe8, 0xb3, 0x4e, 0x74, 0x12, 0xf2, 0x8d, 0xcd, 0xb4, 0x0f, 0xd2, 0x2d, 0x3a, 0x1d, 0x25, 0x5a, 0x06, 0x4b,
- /* (2^248)P */ 0x4a, 0xcd, 0x77, 0x3d, 0x38, 0xde, 0xeb, 0x5c, 0xb1, 0x9c, 0x2c, 0x88, 0xdf, 0x39, 0xdf, 0x6a, 0x59, 0xf7, 0x9a, 0xb0, 0x2e, 0x24, 0xdd, 0xa2, 0x22, 0x64, 0x5f, 0x0e, 0xe5, 0xc0, 0x47, 0x31,
- /* (2^249)P */ 0xdb, 0x50, 0x13, 0x1d, 0x10, 0xa5, 0x4c, 0x16, 0x62, 0xc9, 0x3f, 0xc3, 0x79, 0x34, 0xd1, 0xf8, 0x08, 0xda, 0xe5, 0x13, 0x4d, 0xce, 0x40, 0xe6, 0xba, 0xf8, 0x61, 0x50, 0xc4, 0xe0, 0xde, 0x4b,
- /* (2^250)P */ 0xc9, 0xb1, 0xed, 0xa4, 0xc1, 0x6d, 0xc4, 0xd7, 0x8a, 0xd9, 0x7f, 0x43, 0xb6, 0xd7, 0x14, 0x55, 0x0b, 0xc0, 0xa1, 0xb2, 0x6b, 0x2f, 0x94, 0x58, 0x0e, 0x71, 0x70, 0x1d, 0xab, 0xb2, 0xff, 0x2d,
- /* (2^251)P */ 0x68, 0x6d, 0x8b, 0xc1, 0x2f, 0xcf, 0xdf, 0xcc, 0x67, 0x61, 0x80, 0xb7, 0xa8, 0xcb, 0xeb, 0xa8, 0xe3, 0x37, 0x29, 0x5e, 0xf9, 0x97, 0x06, 0x98, 0x8c, 0x6e, 0x12, 0xd0, 0x1c, 0xba, 0xfb, 0x02,
- /* (2^252)P */ 0x65, 0x45, 0xff, 0xad, 0x60, 0xc3, 0x98, 0xcb, 0x19, 0x15, 0xdb, 0x4b, 0xd2, 0x01, 0x71, 0x44, 0xd5, 0x15, 0xfb, 0x75, 0x74, 0xc8, 0xc4, 0x98, 0x7d, 0xa2, 0x22, 0x6e, 0x6d, 0xc7, 0xf8, 0x05,
- /* (2^253)P */ 0x94, 0xf4, 0xb9, 0xfe, 0xdf, 0xe5, 0x69, 0xab, 0x75, 0x6b, 0x40, 0x18, 0x9d, 0xc7, 0x09, 0xae, 0x1d, 0x2d, 0xa4, 0x94, 0xfb, 0x45, 0x9b, 0x19, 0x84, 0xfa, 0x2a, 0xae, 0xeb, 0x0a, 0x71, 0x79,
- /* (2^254)P */ 0xdf, 0xd2, 0x34, 0xf3, 0xa7, 0xed, 0xad, 0xa6, 0xb4, 0x57, 0x2a, 0xaf, 0x51, 0x9c, 0xde, 0x7b, 0xa8, 0xea, 0xdc, 0x86, 0x4f, 0xc6, 0x8f, 0xa9, 0x7b, 0xd0, 0x0e, 0xc2, 0x35, 0x03, 0xbe, 0x6b,
- /* (2^255)P */ 0x44, 0x43, 0x98, 0x53, 0xbe, 0xdc, 0x7f, 0x66, 0xa8, 0x49, 0x59, 0x00, 0x1c, 0xbc, 0x72, 0x07, 0x8e, 0xd6, 0xbe, 0x4e, 0x9f, 0xa4, 0x07, 0xba, 0xbf, 0x30, 0xdf, 0xba, 0x85, 0xb0, 0xa7, 0x1f,
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve.go b/vendor/github.com/cloudflare/circl/dh/x448/curve.go
deleted file mode 100644
index d59564e4..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package x448
-
-import (
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// ladderJoye calculates a fixed-point multiplication with the generator point.
-// The algorithm is the right-to-left Joye's ladder as described
-// in "How to precompute a ladder" in SAC'2017.
-func ladderJoye(k *Key) {
- w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved.
- w[1] = fp.Elt{ // x1 = S
- 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
- fp.SetOne(&w[2]) // z1 = 1
- w[3] = fp.Elt{ // x2 = G-S
- 0x20, 0x27, 0x9d, 0xc9, 0x7d, 0x19, 0xb1, 0xac,
- 0xf8, 0xba, 0x69, 0x1c, 0xff, 0x33, 0xac, 0x23,
- 0x51, 0x1b, 0xce, 0x3a, 0x64, 0x65, 0xbd, 0xf1,
- 0x23, 0xf8, 0xc1, 0x84, 0x9d, 0x45, 0x54, 0x29,
- 0x67, 0xb9, 0x81, 0x1c, 0x03, 0xd1, 0xcd, 0xda,
- 0x7b, 0xeb, 0xff, 0x1a, 0x88, 0x03, 0xcf, 0x3a,
- 0x42, 0x44, 0x32, 0x01, 0x25, 0xb7, 0xfa, 0xf0,
- }
- fp.SetOne(&w[4]) // z2 = 1
-
- const n = 448
- const h = 2
- swap := uint(1)
- for s := 0; s < n-h; s++ {
- i := (s + h) / 8
- j := (s + h) % 8
- bit := uint((k[i] >> uint(j)) & 1)
- copy(w[0][:], tableGenerator[s*Size:(s+1)*Size])
- diffAdd(&w, swap^bit)
- swap = bit
- }
- for s := 0; s < h; s++ {
- double(&w[1], &w[2])
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-// ladderMontgomery calculates a generic scalar point multiplication
-// The algorithm implemented is the left-to-right Montgomery's ladder.
-func ladderMontgomery(k, xP *Key) {
- w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved.
- w[0] = *(*fp.Elt)(xP) // x1 = xP
- fp.SetOne(&w[1]) // x2 = 1
- w[3] = *(*fp.Elt)(xP) // x3 = xP
- fp.SetOne(&w[4]) // z3 = 1
-
- move := uint(0)
- for s := 448 - 1; s >= 0; s-- {
- i := s / 8
- j := s % 8
- bit := uint((k[i] >> uint(j)) & 1)
- ladderStep(&w, move^bit)
- move = bit
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-func toAffine(k *[fp.Size]byte, x, z *fp.Elt) {
- fp.Inv(z, z)
- fp.Mul(x, x, z)
- _ = fp.ToBytes(k[:], x)
-}
-
-var lowOrderPoints = [3]fp.Elt{
- { /* (0,_,1) point of order 2 on Curve448 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (1,_,1) a point of order 4 on the twist of Curve448 */
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (-1,_,1) point of order 4 on Curve448 */
- 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go
deleted file mode 100644
index a0622666..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package x448
-
-import (
- fp "github.com/cloudflare/circl/math/fp448"
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func double(x, z *fp.Elt) { doubleAmd64(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) }
-
-//go:noescape
-func doubleAmd64(x, z *fp.Elt)
-
-//go:noescape
-func diffAddAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func ladderStepAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func mulA24Amd64(z, x *fp.Elt)
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h
deleted file mode 100644
index 8c1ae4d0..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#define ladderStepLeg \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulLeg(b0,x2,z3) \
- integerMulLeg(b1,x3,z2) \
- reduceFromDoubleLeg(t0,b0) \
- reduceFromDoubleLeg(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrLeg(b0,t0) \
- integerSqrLeg(b1,t1) \
- reduceFromDoubleLeg(x3,b0) \
- reduceFromDoubleLeg(z3,b1) \
- integerMulLeg(b0,x1,z3) \
- reduceFromDoubleLeg(z3,b0) \
- integerSqrLeg(b0,x2) \
- integerSqrLeg(b1,z2) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z2) \
- integerMulLeg(b0,x2,z2) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1)
-
-#define ladderStepBmi2Adx \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulAdx(b0,x2,z3) \
- integerMulAdx(b1,x3,z2) \
- reduceFromDoubleAdx(t0,b0) \
- reduceFromDoubleAdx(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrAdx(b0,t0) \
- integerSqrAdx(b1,t1) \
- reduceFromDoubleAdx(x3,b0) \
- reduceFromDoubleAdx(z3,b1) \
- integerMulAdx(b0,x1,z3) \
- reduceFromDoubleAdx(z3,b0) \
- integerSqrAdx(b0,x2) \
- integerSqrAdx(b1,z2) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z2) \
- integerMulAdx(b0,x2,z2) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1)
-
-#define difAddLeg \
- addSub(x1,z1) \
- integerMulLeg(b0,z1,ui) \
- reduceFromDoubleLeg(z1,b0) \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- integerMulLeg(b0,x1,z2) \
- integerMulLeg(b1,z1,x2) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define difAddBmi2Adx \
- addSub(x1,z1) \
- integerMulAdx(b0,z1,ui) \
- reduceFromDoubleAdx(z1,b0) \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- integerMulAdx(b0,x1,z2) \
- integerMulAdx(b1,z1,x2) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
-
-#define doubleLeg \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z1) \
- integerMulLeg(b0,x1,z1) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define doubleBmi2Adx \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z1) \
- integerMulAdx(b0,x1,z1) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s
deleted file mode 100644
index 810aa9e6..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s
+++ /dev/null
@@ -1,193 +0,0 @@
-// +build amd64
-
-#include "textflag.h"
-
-// Depends on circl/math/fp448 package
-#include "../../math/fp448/fp_amd64.h"
-#include "curve_amd64.h"
-
-// CTE_A24 is (A+2)/4 from Curve448
-#define CTE_A24 39082
-
-#define Size 56
-
-// multiplyA24Leg multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, cmov, adx
-#define multiplyA24Leg(z,x) \
- MOVQ $CTE_A24, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; MOVQ $0, DX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- MOVQ R8, 0+z; \
- MOVQ R9, 8+z; \
- MOVQ R10, 16+z; \
- MOVQ R11, 24+z; \
- MOVQ R12, 32+z; \
- MOVQ R13, 40+z; \
- MOVQ R14, 48+z;
-
-// multiplyA24Adx multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64, bmi2
-#define multiplyA24Adx(z,x) \
- MOVQ $CTE_A24, DX; \
- MULXQ 0+x, R8, R9; \
- MULXQ 8+x, AX, R10; ADDQ AX, R9; \
- MULXQ 16+x, AX, R11; ADCQ AX, R10; \
- MULXQ 24+x, AX, R12; ADCQ AX, R11; \
- MULXQ 32+x, AX, R13; ADCQ AX, R12; \
- MULXQ 40+x, AX, R14; ADCQ AX, R13; \
- MULXQ 48+x, AX, DX; ADCQ AX, R14; \
- ;;;;;;;;;;;;;;;;;;;; ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; MOVQ $0, DX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- MOVQ R8, 0+z; \
- MOVQ R9, 8+z; \
- MOVQ R10, 16+z; \
- MOVQ R11, 24+z; \
- MOVQ R12, 32+z; \
- MOVQ R13, 40+z; \
- MOVQ R14, 48+z;
-
-#define mulA24Legacy \
- multiplyA24Leg(0(DI),0(SI))
-#define mulA24Bmi2Adx \
- multiplyA24Adx(0(DI),0(SI))
-
-// func mulA24Amd64(z, x *fp448.Elt)
-TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx)
-
-// func ladderStepAmd64(w *[5]fp448.Elt, b uint)
-// ladderStepAmd64 calculates a point addition and doubling as follows:
-// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-).
-// w = {x1,x2,z2,x3,z4} are five fp255.Elt of 56 bytes.
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·ladderStepAmd64(SB),NOSPLIT,$336-16
- // Parameters
- #define regWork DI
- #define regMove SI
- #define x1 0*Size(regWork)
- #define x2 1*Size(regWork)
- #define z2 2*Size(regWork)
- #define x3 3*Size(regWork)
- #define z3 4*Size(regWork)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regMove
- CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx)
- #undef regWork
- #undef regMove
- #undef x1
- #undef x2
- #undef z2
- #undef x3
- #undef z3
- #undef t0
- #undef t1
- #undef b0
- #undef b1
-
-// func diffAddAmd64(work *[5]fp.Elt, swap uint)
-// diffAddAmd64 calculates a differential point addition using a precomputed point.
-// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2)
-// work = {mu,x1,z1,x2,z2} are five fp448.Elt of 56 bytes, and
-// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-// This is Equation 7 at https://eprint.iacr.org/2017/264.
-TEXT ·diffAddAmd64(SB),NOSPLIT,$224-16
- // Parameters
- #define regWork DI
- #define regSwap SI
- #define ui 0*Size(regWork)
- #define x1 1*Size(regWork)
- #define z1 2*Size(regWork)
- #define x2 3*Size(regWork)
- #define z2 4*Size(regWork)
- // Local variables
- #define b0 0*Size(SP)
- #define b1 2*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regSwap
- cswap(x1,x2,regSwap)
- cswap(z1,z2,regSwap)
- CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx)
- #undef regWork
- #undef regSwap
- #undef ui
- #undef x1
- #undef z1
- #undef x2
- #undef z2
- #undef b0
- #undef b1
-
-// func doubleAmd64(x, z *fp448.Elt)
-// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1).
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·doubleAmd64(SB),NOSPLIT,$336-16
- // Parameters
- #define x1 0(DI)
- #define z1 0(SI)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ x+0(FP), DI
- MOVQ z+8(FP), SI
- CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx)
- #undef x1
- #undef z1
- #undef t0
- #undef t1
- #undef b0
- #undef b1
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go
deleted file mode 100644
index b0b65ccf..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package x448
-
-import (
- "encoding/binary"
- "math/bits"
-
- "github.com/cloudflare/circl/math/fp448"
-)
-
-func doubleGeneric(x, z *fp448.Elt) {
- t0, t1 := &fp448.Elt{}, &fp448.Elt{}
- fp448.AddSub(x, z)
- fp448.Sqr(x, x)
- fp448.Sqr(z, z)
- fp448.Sub(t0, x, z)
- mulA24Generic(t1, t0)
- fp448.Add(t1, t1, z)
- fp448.Mul(x, x, z)
- fp448.Mul(z, t0, t1)
-}
-
-func diffAddGeneric(w *[5]fp448.Elt, b uint) {
- mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4]
- fp448.Cswap(x1, x2, b)
- fp448.Cswap(z1, z2, b)
- fp448.AddSub(x1, z1)
- fp448.Mul(z1, z1, mu)
- fp448.AddSub(x1, z1)
- fp448.Sqr(x1, x1)
- fp448.Sqr(z1, z1)
- fp448.Mul(x1, x1, z2)
- fp448.Mul(z1, z1, x2)
-}
-
-func ladderStepGeneric(w *[5]fp448.Elt, b uint) {
- x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4]
- t0 := &fp448.Elt{}
- t1 := &fp448.Elt{}
- fp448.AddSub(x2, z2)
- fp448.AddSub(x3, z3)
- fp448.Mul(t0, x2, z3)
- fp448.Mul(t1, x3, z2)
- fp448.AddSub(t0, t1)
- fp448.Cmov(x2, x3, b)
- fp448.Cmov(z2, z3, b)
- fp448.Sqr(x3, t0)
- fp448.Sqr(z3, t1)
- fp448.Mul(z3, x1, z3)
- fp448.Sqr(x2, x2)
- fp448.Sqr(z2, z2)
- fp448.Sub(t0, x2, z2)
- mulA24Generic(t1, t0)
- fp448.Add(t1, t1, z2)
- fp448.Mul(x2, x2, z2)
- fp448.Mul(z2, t0, t1)
-}
-
-func mulA24Generic(z, x *fp448.Elt) {
- const A24 = 39082
- const n = 8
- var xx [7]uint64
- for i := range xx {
- xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n])
- }
- h0, l0 := bits.Mul64(xx[0], A24)
- h1, l1 := bits.Mul64(xx[1], A24)
- h2, l2 := bits.Mul64(xx[2], A24)
- h3, l3 := bits.Mul64(xx[3], A24)
- h4, l4 := bits.Mul64(xx[4], A24)
- h5, l5 := bits.Mul64(xx[5], A24)
- h6, l6 := bits.Mul64(xx[6], A24)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, c3 := bits.Add64(h3, l4, c2)
- l5, c4 := bits.Add64(h4, l5, c3)
- l6, c5 := bits.Add64(h5, l6, c4)
- l7, _ := bits.Add64(h6, 0, c5)
-
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, l7 = bits.Add64(l6, 0, c5)
-
- xx[0], c0 = bits.Add64(l0, l7, 0)
- xx[1], c1 = bits.Add64(l1, 0, c0)
- xx[2], c2 = bits.Add64(l2, 0, c1)
- xx[3], c3 = bits.Add64(l3, l7<<32, c2)
- xx[4], c4 = bits.Add64(l4, 0, c3)
- xx[5], c5 = bits.Add64(l5, 0, c4)
- xx[6], _ = bits.Add64(l6, 0, c5)
-
- for i := range xx {
- binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i])
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go
deleted file mode 100644
index 3755b7c8..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package x448
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-func double(x, z *fp.Elt) { doubleGeneric(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/doc.go b/vendor/github.com/cloudflare/circl/dh/x448/doc.go
deleted file mode 100644
index c02904fe..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Package x448 provides Diffie-Hellman functions as specified in RFC-7748.
-
-Validation of public keys.
-
-The Diffie-Hellman function, as described in RFC-7748 [1], works for any
-public key. However, if a different protocol requires contributory
-behaviour [2,3], then the public keys must be validated against low-order
-points [3,4]. To do that, the Shared function performs this validation
-internally and returns false when the public key is invalid (i.e., it
-is a low-order point).
-
-References:
- - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt)
- - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html)
- - [3] Bernstein (https://cr.yp.to/ecdh.html#validate)
- - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526)
-*/
-package x448
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/key.go b/vendor/github.com/cloudflare/circl/dh/x448/key.go
deleted file mode 100644
index 2fdde511..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/key.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package x448
-
-import (
- "crypto/subtle"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// Size is the length in bytes of a X448 key.
-const Size = 56
-
-// Key represents a X448 key.
-type Key [Size]byte
-
-func (k *Key) clamp(in *Key) *Key {
- *k = *in
- k[0] &= 252
- k[55] |= 128
- return k
-}
-
-// isValidPubKey verifies if the public key is not a low-order point.
-func (k *Key) isValidPubKey() bool {
- fp.Modp((*fp.Elt)(k))
- var isLowOrder int
- for _, P := range lowOrderPoints {
- isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:])
- }
- return isLowOrder == 0
-}
-
-// KeyGen obtains a public key given a secret key.
-func KeyGen(public, secret *Key) {
- ladderJoye(public.clamp(secret))
-}
-
-// Shared calculates Alice's shared key from Alice's secret key and Bob's
-// public key returning true on success. A failure case happens when the public
-// key is a low-order point, thus the shared key is all-zeros and the function
-// returns false.
-func Shared(shared, secret, public *Key) bool {
- validPk := *public
- ok := validPk.isValidPubKey()
- ladderMontgomery(shared.clamp(secret), &validPk)
- return ok
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/table.go b/vendor/github.com/cloudflare/circl/dh/x448/table.go
deleted file mode 100644
index eef53c30..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/table.go
+++ /dev/null
@@ -1,460 +0,0 @@
-package x448
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-// tableGenerator contains the set of points:
-//
-// t[i] = (xi+1)/(xi-1),
-//
-// where (xi,yi) = 2^iG and G is the generator point
-// Size = (448)*(448/8) = 25088 bytes.
-var tableGenerator = [448 * fp.Size]byte{
- /* (2^ 0)P */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
- /* (2^ 1)P */ 0x37, 0xfa, 0xaa, 0x0d, 0x86, 0xa6, 0x24, 0xe9, 0x6c, 0x95, 0x08, 0x34, 0xba, 0x1a, 0x81, 0x3a, 0xae, 0x01, 0xa5, 0xa7, 0x05, 0x85, 0x96, 0x00, 0x06, 0x5a, 0xd7, 0xff, 0xee, 0x8e, 0x8f, 0x94, 0xd2, 0xdc, 0xd7, 0xfc, 0xe7, 0xe5, 0x99, 0x1d, 0x05, 0x46, 0x43, 0xe8, 0xbc, 0x12, 0xb7, 0xeb, 0x30, 0x5e, 0x7a, 0x85, 0x68, 0xed, 0x9d, 0x28,
- /* (2^ 2)P */ 0xf1, 0x7d, 0x08, 0x2b, 0x32, 0x4a, 0x62, 0x80, 0x36, 0xe7, 0xa4, 0x76, 0x5a, 0x2a, 0x1e, 0xf7, 0x9e, 0x3c, 0x40, 0x46, 0x9a, 0x1b, 0x61, 0xc1, 0xbf, 0x1a, 0x1b, 0xae, 0x91, 0x80, 0xa3, 0x76, 0x6c, 0xd4, 0x8f, 0xa4, 0xee, 0x26, 0x39, 0x23, 0xa4, 0x80, 0xf4, 0x66, 0x92, 0xe4, 0xe1, 0x18, 0x76, 0xc5, 0xe2, 0x19, 0x87, 0xd5, 0xc3, 0xe8,
- /* (2^ 3)P */ 0xfb, 0xc9, 0xf0, 0x07, 0xf2, 0x93, 0xd8, 0x50, 0x36, 0xed, 0xfb, 0xbd, 0xb2, 0xd3, 0xfc, 0xdf, 0xd5, 0x2a, 0x6e, 0x26, 0x09, 0xce, 0xd4, 0x07, 0x64, 0x9f, 0x40, 0x74, 0xad, 0x98, 0x2f, 0x1c, 0xb6, 0xdc, 0x2d, 0x42, 0xff, 0xbf, 0x97, 0xd8, 0xdb, 0xef, 0x99, 0xca, 0x73, 0x99, 0x1a, 0x04, 0x3b, 0x56, 0x2c, 0x1f, 0x87, 0x9d, 0x9f, 0x03,
- /* (2^ 4)P */ 0x4c, 0x35, 0x97, 0xf7, 0x81, 0x2c, 0x84, 0xa6, 0xe0, 0xcb, 0xce, 0x37, 0x4c, 0x21, 0x1c, 0x67, 0xfa, 0xab, 0x18, 0x4d, 0xef, 0xd0, 0xf0, 0x44, 0xa9, 0xfb, 0xc0, 0x8e, 0xda, 0x57, 0xa1, 0xd8, 0xeb, 0x87, 0xf4, 0x17, 0xea, 0x66, 0x0f, 0x16, 0xea, 0xcd, 0x5f, 0x3e, 0x88, 0xea, 0x09, 0x68, 0x40, 0xdf, 0x43, 0xcc, 0x54, 0x61, 0x58, 0xaa,
- /* (2^ 5)P */ 0x8d, 0xe7, 0x59, 0xd7, 0x5e, 0x63, 0x37, 0xa7, 0x3f, 0xd1, 0x49, 0x85, 0x01, 0xdd, 0x5e, 0xb3, 0xe6, 0x29, 0xcb, 0x25, 0x93, 0xdd, 0x08, 0x96, 0x83, 0x52, 0x76, 0x85, 0xf5, 0x5d, 0x02, 0xbf, 0xe9, 0x6d, 0x15, 0x27, 0xc1, 0x09, 0xd1, 0x14, 0x4d, 0x6e, 0xe8, 0xaf, 0x59, 0x58, 0x34, 0x9d, 0x2a, 0x99, 0x85, 0x26, 0xbe, 0x4b, 0x1e, 0xb9,
- /* (2^ 6)P */ 0x8d, 0xce, 0x94, 0xe2, 0x18, 0x56, 0x0d, 0x82, 0x8e, 0xdf, 0x85, 0x01, 0x8f, 0x93, 0x3c, 0xc6, 0xbd, 0x61, 0xfb, 0xf4, 0x22, 0xc5, 0x16, 0x87, 0xd1, 0xb1, 0x9e, 0x09, 0xc5, 0x83, 0x2e, 0x4a, 0x07, 0x88, 0xee, 0xe0, 0x29, 0x8d, 0x2e, 0x1f, 0x88, 0xad, 0xfd, 0x18, 0x93, 0xb7, 0xed, 0x42, 0x86, 0x78, 0xf0, 0xb8, 0x70, 0xbe, 0x01, 0x67,
- /* (2^ 7)P */ 0xdf, 0x62, 0x2d, 0x94, 0xc7, 0x35, 0x23, 0xda, 0x27, 0xbb, 0x2b, 0xdb, 0x30, 0x80, 0x68, 0x16, 0xa3, 0xae, 0xd7, 0xd2, 0xa7, 0x7c, 0xbf, 0x6a, 0x1d, 0x83, 0xde, 0x96, 0x0a, 0x43, 0xb6, 0x30, 0x37, 0xd6, 0xee, 0x63, 0x59, 0x9a, 0xbf, 0xa3, 0x30, 0x6c, 0xaf, 0x0c, 0xee, 0x3d, 0xcb, 0x35, 0x4b, 0x55, 0x5f, 0x84, 0x85, 0xcb, 0x4f, 0x1e,
- /* (2^ 8)P */ 0x9d, 0x04, 0x68, 0x89, 0xa4, 0xa9, 0x0d, 0x87, 0xc1, 0x70, 0xf1, 0xeb, 0xfb, 0x47, 0x0a, 0xf0, 0xde, 0x67, 0xb7, 0x94, 0xcd, 0x36, 0x43, 0xa5, 0x49, 0x43, 0x67, 0xc3, 0xee, 0x3c, 0x6b, 0xec, 0xd0, 0x1a, 0xf4, 0xad, 0xef, 0x06, 0x4a, 0xe8, 0x46, 0x24, 0xd7, 0x93, 0xbf, 0xf0, 0xe3, 0x81, 0x61, 0xec, 0xea, 0x64, 0xfe, 0x67, 0xeb, 0xc7,
- /* (2^ 9)P */ 0x95, 0x45, 0x79, 0xcf, 0x2c, 0xfd, 0x9b, 0xfe, 0x84, 0x46, 0x4b, 0x8f, 0xa1, 0xcf, 0xc3, 0x04, 0x94, 0x78, 0xdb, 0xc9, 0xa6, 0x01, 0x75, 0xa4, 0xb4, 0x93, 0x72, 0x43, 0xa7, 0x7d, 0xda, 0x31, 0x38, 0x54, 0xab, 0x4e, 0x3f, 0x89, 0xa6, 0xab, 0x57, 0xc0, 0x16, 0x65, 0xdb, 0x92, 0x96, 0xe4, 0xc8, 0xae, 0xe7, 0x4c, 0x7a, 0xeb, 0xbb, 0x5a,
- /* (2^ 10)P */ 0xbe, 0xfe, 0x86, 0xc3, 0x97, 0xe0, 0x6a, 0x18, 0x20, 0x21, 0xca, 0x22, 0x55, 0xa1, 0xeb, 0xf5, 0x74, 0xe5, 0xc9, 0x59, 0xa7, 0x92, 0x65, 0x15, 0x08, 0x71, 0xd1, 0x09, 0x7e, 0x83, 0xfc, 0xbc, 0x5a, 0x93, 0x38, 0x0d, 0x43, 0x42, 0xfd, 0x76, 0x30, 0xe8, 0x63, 0x60, 0x09, 0x8d, 0x6c, 0xd3, 0xf8, 0x56, 0x3d, 0x68, 0x47, 0xab, 0xa0, 0x1d,
- /* (2^ 11)P */ 0x38, 0x50, 0x1c, 0xb1, 0xac, 0x88, 0x8f, 0x38, 0xe3, 0x69, 0xe6, 0xfc, 0x4f, 0x8f, 0xe1, 0x9b, 0xb1, 0x1a, 0x09, 0x39, 0x19, 0xdf, 0xcd, 0x98, 0x7b, 0x64, 0x42, 0xf6, 0x11, 0xea, 0xc7, 0xe8, 0x92, 0x65, 0x00, 0x2c, 0x75, 0xb5, 0x94, 0x1e, 0x5b, 0xa6, 0x66, 0x81, 0x77, 0xf3, 0x39, 0x94, 0xac, 0xbd, 0xe4, 0x2a, 0x66, 0x84, 0x9c, 0x60,
- /* (2^ 12)P */ 0xb5, 0xb6, 0xd9, 0x03, 0x67, 0xa4, 0xa8, 0x0a, 0x4a, 0x2b, 0x9d, 0xfa, 0x13, 0xe1, 0x99, 0x25, 0x4a, 0x5c, 0x67, 0xb9, 0xb2, 0xb7, 0xdd, 0x1e, 0xaf, 0xeb, 0x63, 0x41, 0xb6, 0xb9, 0xa0, 0x87, 0x0a, 0xe0, 0x06, 0x07, 0xaa, 0x97, 0xf8, 0xf9, 0x38, 0x4f, 0xdf, 0x0c, 0x40, 0x7c, 0xc3, 0x98, 0xa9, 0x74, 0xf1, 0x5d, 0xda, 0xd1, 0xc0, 0x0a,
- /* (2^ 13)P */ 0xf2, 0x0a, 0xab, 0xab, 0x94, 0x50, 0xf0, 0xa3, 0x6f, 0xc6, 0x66, 0xba, 0xa6, 0xdc, 0x44, 0xdd, 0xd6, 0x08, 0xf4, 0xd3, 0xed, 0xb1, 0x40, 0x93, 0xee, 0xf6, 0xb8, 0x8e, 0xb4, 0x7c, 0xb9, 0x82, 0xc9, 0x9d, 0x45, 0x3b, 0x8e, 0x10, 0xcb, 0x70, 0x1e, 0xba, 0x3c, 0x62, 0x50, 0xda, 0xa9, 0x93, 0xb5, 0xd7, 0xd0, 0x6f, 0x29, 0x52, 0x95, 0xae,
- /* (2^ 14)P */ 0x14, 0x68, 0x69, 0x23, 0xa8, 0x44, 0x87, 0x9e, 0x22, 0x91, 0xe8, 0x92, 0xdf, 0xf7, 0xae, 0xba, 0x1c, 0x96, 0xe1, 0xc3, 0x94, 0xed, 0x6c, 0x95, 0xae, 0x96, 0xa7, 0x15, 0x9f, 0xf1, 0x17, 0x11, 0x92, 0x42, 0xd5, 0xcd, 0x18, 0xe7, 0xa9, 0xb5, 0x2f, 0xcd, 0xde, 0x6c, 0xc9, 0x7d, 0xfc, 0x7e, 0xbd, 0x7f, 0x10, 0x3d, 0x01, 0x00, 0x8d, 0x95,
- /* (2^ 15)P */ 0x3b, 0x76, 0x72, 0xae, 0xaf, 0x84, 0xf2, 0xf7, 0xd1, 0x6d, 0x13, 0x9c, 0x47, 0xe1, 0xb7, 0xa3, 0x19, 0x16, 0xee, 0x75, 0x45, 0xf6, 0x1a, 0x7b, 0x78, 0x49, 0x79, 0x05, 0x86, 0xf0, 0x7f, 0x9f, 0xfc, 0xc4, 0xbd, 0x86, 0xf3, 0x41, 0xa7, 0xfe, 0x01, 0xd5, 0x67, 0x16, 0x10, 0x5b, 0xa5, 0x16, 0xf3, 0x7f, 0x60, 0xce, 0xd2, 0x0c, 0x8e, 0x4b,
- /* (2^ 16)P */ 0x4a, 0x07, 0x99, 0x4a, 0x0f, 0x74, 0x91, 0x14, 0x68, 0xb9, 0x48, 0xb7, 0x44, 0x77, 0x9b, 0x4a, 0xe0, 0x68, 0x0e, 0x43, 0x4d, 0x98, 0x98, 0xbf, 0xa8, 0x3a, 0xb7, 0x6d, 0x2a, 0x9a, 0x77, 0x5f, 0x62, 0xf5, 0x6b, 0x4a, 0xb7, 0x7d, 0xe5, 0x09, 0x6b, 0xc0, 0x8b, 0x9c, 0x88, 0x37, 0x33, 0xf2, 0x41, 0xac, 0x22, 0x1f, 0xcf, 0x3b, 0x82, 0x34,
- /* (2^ 17)P */ 0x00, 0xc3, 0x78, 0x42, 0x32, 0x2e, 0xdc, 0xda, 0xb1, 0x96, 0x21, 0xa4, 0xe4, 0xbb, 0xe9, 0x9d, 0xbb, 0x0f, 0x93, 0xed, 0x26, 0x3d, 0xb5, 0xdb, 0x94, 0x31, 0x37, 0x07, 0xa2, 0xb2, 0xd5, 0x99, 0x0d, 0x93, 0xe1, 0xce, 0x3f, 0x0b, 0x96, 0x82, 0x47, 0xfe, 0x60, 0x6f, 0x8f, 0x61, 0x88, 0xd7, 0x05, 0x95, 0x0b, 0x46, 0x06, 0xb7, 0x32, 0x06,
- /* (2^ 18)P */ 0x44, 0xf5, 0x34, 0xdf, 0x2f, 0x9c, 0x5d, 0x9f, 0x53, 0x5c, 0x42, 0x8f, 0xc9, 0xdc, 0xd8, 0x40, 0xa2, 0xe7, 0x6a, 0x4a, 0x05, 0xf7, 0x86, 0x77, 0x2b, 0xae, 0x37, 0xed, 0x48, 0xfb, 0xf7, 0x62, 0x7c, 0x17, 0x59, 0x92, 0x41, 0x61, 0x93, 0x38, 0x30, 0xd1, 0xef, 0x54, 0x54, 0x03, 0x17, 0x57, 0x91, 0x15, 0x11, 0x33, 0xb5, 0xfa, 0xfb, 0x17,
- /* (2^ 19)P */ 0x29, 0xbb, 0xd4, 0xb4, 0x9c, 0xf1, 0x72, 0x94, 0xce, 0x6a, 0x29, 0xa8, 0x89, 0x18, 0x19, 0xf7, 0xb7, 0xcc, 0xee, 0x9a, 0x02, 0xe3, 0xc0, 0xb1, 0xe0, 0xee, 0x83, 0x78, 0xb4, 0x9e, 0x07, 0x87, 0xdf, 0xb0, 0x82, 0x26, 0x4e, 0xa4, 0x0c, 0x33, 0xaf, 0x40, 0x59, 0xb6, 0xdd, 0x52, 0x45, 0xf0, 0xb4, 0xf6, 0xe8, 0x4e, 0x4e, 0x79, 0x1a, 0x5d,
- /* (2^ 20)P */ 0x27, 0x33, 0x4d, 0x4c, 0x6b, 0x4f, 0x75, 0xb1, 0xbc, 0x1f, 0xab, 0x5b, 0x2b, 0xf0, 0x1c, 0x57, 0x86, 0xdd, 0xfd, 0x60, 0xb0, 0x8c, 0xe7, 0x9a, 0xe5, 0x5c, 0xeb, 0x11, 0x3a, 0xda, 0x22, 0x25, 0x99, 0x06, 0x8d, 0xf4, 0xaf, 0x29, 0x7a, 0xc9, 0xe5, 0xd2, 0x16, 0x9e, 0xd4, 0x63, 0x1d, 0x64, 0xa6, 0x47, 0x96, 0x37, 0x6f, 0x93, 0x2c, 0xcc,
- /* (2^ 21)P */ 0xc1, 0x94, 0x74, 0x86, 0x75, 0xf2, 0x91, 0x58, 0x23, 0x85, 0x63, 0x76, 0x54, 0xc7, 0xb4, 0x8c, 0xbc, 0x4e, 0xc4, 0xa7, 0xba, 0xa0, 0x55, 0x26, 0x71, 0xd5, 0x33, 0x72, 0xc9, 0xad, 0x1e, 0xf9, 0x5d, 0x78, 0x70, 0x93, 0x4e, 0x85, 0xfc, 0x39, 0x06, 0x73, 0x76, 0xff, 0xe8, 0x64, 0x69, 0x42, 0x45, 0xb2, 0x69, 0xb5, 0x32, 0xe7, 0x2c, 0xde,
- /* (2^ 22)P */ 0xde, 0x16, 0xd8, 0x33, 0x49, 0x32, 0xe9, 0x0e, 0x3a, 0x60, 0xee, 0x2e, 0x24, 0x75, 0xe3, 0x9c, 0x92, 0x07, 0xdb, 0xad, 0x92, 0xf5, 0x11, 0xdf, 0xdb, 0xb0, 0x17, 0x5c, 0xd6, 0x1a, 0x70, 0x00, 0xb7, 0xe2, 0x18, 0xec, 0xdc, 0xc2, 0x02, 0x93, 0xb3, 0xc8, 0x3f, 0x4f, 0x1b, 0x96, 0xe6, 0x33, 0x8c, 0xfb, 0xcc, 0xa5, 0x4e, 0xe8, 0xe7, 0x11,
- /* (2^ 23)P */ 0x05, 0x7a, 0x74, 0x52, 0xf8, 0xdf, 0x0d, 0x7c, 0x6a, 0x1a, 0x4e, 0x9a, 0x02, 0x1d, 0xae, 0x77, 0xf8, 0x8e, 0xf9, 0xa2, 0x38, 0x54, 0x50, 0xb2, 0x2c, 0x08, 0x9d, 0x9b, 0x9f, 0xfb, 0x2b, 0x06, 0xde, 0x9d, 0xc2, 0x03, 0x0b, 0x22, 0x2b, 0x10, 0x5b, 0x3a, 0x73, 0x29, 0x8e, 0x3e, 0x37, 0x08, 0x2c, 0x3b, 0xf8, 0x80, 0xc1, 0x66, 0x1e, 0x98,
- /* (2^ 24)P */ 0xd8, 0xd6, 0x3e, 0xcd, 0x63, 0x8c, 0x2b, 0x41, 0x81, 0xc0, 0x0c, 0x06, 0x87, 0xd6, 0xe7, 0x92, 0xfe, 0xf1, 0x0c, 0x4a, 0x84, 0x5b, 0xaf, 0x40, 0x53, 0x6f, 0x60, 0xd6, 0x6b, 0x76, 0x4b, 0xc2, 0xad, 0xc9, 0xb6, 0xb6, 0x6a, 0xa2, 0xb3, 0xf5, 0xf5, 0xc2, 0x55, 0x83, 0xb2, 0xd3, 0xe9, 0x41, 0x6c, 0x63, 0x51, 0xb8, 0x81, 0x74, 0xc8, 0x2c,
- /* (2^ 25)P */ 0xb2, 0xaf, 0x1c, 0xee, 0x07, 0xb0, 0x58, 0xa8, 0x2c, 0x6a, 0xc9, 0x2d, 0x62, 0x28, 0x75, 0x0c, 0x40, 0xb6, 0x11, 0x33, 0x96, 0x80, 0x28, 0x6d, 0xd5, 0x9e, 0x87, 0x90, 0x01, 0x66, 0x1d, 0x1c, 0xf8, 0xb4, 0x92, 0xac, 0x38, 0x18, 0x05, 0xc2, 0x4c, 0x4b, 0x54, 0x7d, 0x80, 0x46, 0x87, 0x2d, 0x99, 0x8e, 0x70, 0x80, 0x69, 0x71, 0x8b, 0xed,
- /* (2^ 26)P */ 0x37, 0xa7, 0x6b, 0x71, 0x36, 0x75, 0x8e, 0xff, 0x0f, 0x42, 0xda, 0x5a, 0x46, 0xa6, 0x97, 0x79, 0x7e, 0x30, 0xb3, 0x8f, 0xc7, 0x3a, 0xa0, 0xcb, 0x1d, 0x9c, 0x78, 0x77, 0x36, 0xc2, 0xe7, 0xf4, 0x2f, 0x29, 0x07, 0xb1, 0x07, 0xfd, 0xed, 0x1b, 0x39, 0x77, 0x06, 0x38, 0x77, 0x0f, 0x50, 0x31, 0x12, 0xbf, 0x92, 0xbf, 0x72, 0x79, 0x54, 0xa9,
- /* (2^ 27)P */ 0xbd, 0x4d, 0x46, 0x6b, 0x1a, 0x80, 0x46, 0x2d, 0xed, 0xfd, 0x64, 0x6d, 0x94, 0xbc, 0x4a, 0x6e, 0x0c, 0x12, 0xf6, 0x12, 0xab, 0x54, 0x88, 0xd3, 0x85, 0xac, 0x51, 0xae, 0x6f, 0xca, 0xc4, 0xb7, 0xec, 0x22, 0x54, 0x6d, 0x80, 0xb2, 0x1c, 0x63, 0x33, 0x76, 0x6b, 0x8e, 0x6d, 0x59, 0xcd, 0x73, 0x92, 0x5f, 0xff, 0xad, 0x10, 0x35, 0x70, 0x5f,
- /* (2^ 28)P */ 0xb3, 0x84, 0xde, 0xc8, 0x04, 0x43, 0x63, 0xfa, 0x29, 0xd9, 0xf0, 0x69, 0x65, 0x5a, 0x0c, 0xe8, 0x2e, 0x0b, 0xfe, 0xb0, 0x7a, 0x42, 0xb3, 0xc3, 0xfc, 0xe6, 0xb8, 0x92, 0x29, 0xae, 0xed, 0xec, 0xd5, 0xe8, 0x4a, 0xa1, 0xbd, 0x3b, 0xd3, 0xc0, 0x07, 0xab, 0x65, 0x65, 0x35, 0x9a, 0xa6, 0x5e, 0x78, 0x18, 0x76, 0x1c, 0x15, 0x49, 0xe6, 0x75,
- /* (2^ 29)P */ 0x45, 0xb3, 0x92, 0xa9, 0xc3, 0xb8, 0x11, 0x68, 0x64, 0x3a, 0x83, 0x5d, 0xa8, 0x94, 0x6a, 0x9d, 0xaa, 0x27, 0x9f, 0x98, 0x5d, 0xc0, 0x29, 0xf0, 0xc0, 0x4b, 0x14, 0x3c, 0x05, 0xe7, 0xf8, 0xbd, 0x38, 0x22, 0x96, 0x75, 0x65, 0x5e, 0x0d, 0x3f, 0xbb, 0x6f, 0xe8, 0x3f, 0x96, 0x76, 0x9f, 0xba, 0xd9, 0x44, 0x92, 0x96, 0x22, 0xe7, 0x52, 0xe7,
- /* (2^ 30)P */ 0xf4, 0xa3, 0x95, 0x90, 0x47, 0xdf, 0x7d, 0xdc, 0xf4, 0x13, 0x87, 0x67, 0x7d, 0x4f, 0x9d, 0xa0, 0x00, 0x46, 0x72, 0x08, 0xc3, 0xa2, 0x7a, 0x3e, 0xe7, 0x6d, 0x52, 0x7c, 0x11, 0x36, 0x50, 0x83, 0x89, 0x64, 0xcb, 0x1f, 0x08, 0x83, 0x46, 0xcb, 0xac, 0xa6, 0xd8, 0x9c, 0x1b, 0xe8, 0x05, 0x47, 0xc7, 0x26, 0x06, 0x83, 0x39, 0xe9, 0xb1, 0x1c,
- /* (2^ 31)P */ 0x11, 0xe8, 0xc8, 0x42, 0xbf, 0x30, 0x9c, 0xa3, 0xf1, 0x85, 0x96, 0x95, 0x4f, 0x4f, 0x52, 0xa2, 0xf5, 0x8b, 0x68, 0x24, 0x16, 0xac, 0x9b, 0xa9, 0x27, 0x28, 0x0e, 0x84, 0x03, 0x46, 0x22, 0x5f, 0xf7, 0x0d, 0xa6, 0x85, 0x88, 0xc1, 0x45, 0x4b, 0x85, 0x1a, 0x10, 0x7f, 0xc9, 0x94, 0x20, 0xb0, 0x04, 0x28, 0x12, 0x30, 0xb9, 0xe6, 0x40, 0x6b,
- /* (2^ 32)P */ 0xac, 0x1b, 0x57, 0xb6, 0x42, 0xdb, 0x81, 0x8d, 0x76, 0xfd, 0x9b, 0x1c, 0x29, 0x30, 0xd5, 0x3a, 0xcc, 0x53, 0xd9, 0x26, 0x7a, 0x0f, 0x9c, 0x2e, 0x79, 0xf5, 0x62, 0xeb, 0x61, 0x9d, 0x9b, 0x80, 0x39, 0xcd, 0x60, 0x2e, 0x1f, 0x08, 0x22, 0xbc, 0x19, 0xb3, 0x2a, 0x43, 0x44, 0xf2, 0x4e, 0x66, 0xf4, 0x36, 0xa6, 0xa7, 0xbc, 0xa4, 0x15, 0x7e,
- /* (2^ 33)P */ 0xc1, 0x90, 0x8a, 0xde, 0xff, 0x78, 0xc3, 0x73, 0x16, 0xee, 0x76, 0xa0, 0x84, 0x60, 0x8d, 0xe6, 0x82, 0x0f, 0xde, 0x4e, 0xc5, 0x99, 0x34, 0x06, 0x90, 0x44, 0x55, 0xf8, 0x91, 0xd8, 0xe1, 0xe4, 0x2c, 0x8a, 0xde, 0x94, 0x1e, 0x78, 0x25, 0x3d, 0xfd, 0xd8, 0x59, 0x7d, 0xaf, 0x6e, 0xbe, 0x96, 0xbe, 0x3c, 0x16, 0x23, 0x0f, 0x4c, 0xa4, 0x28,
- /* (2^ 34)P */ 0xba, 0x11, 0x35, 0x57, 0x03, 0xb6, 0xf4, 0x24, 0x89, 0xb8, 0x5a, 0x0d, 0x50, 0x9c, 0xaa, 0x51, 0x7f, 0xa4, 0x0e, 0xfc, 0x71, 0xb3, 0x3b, 0xf1, 0x96, 0x50, 0x23, 0x15, 0xf5, 0xf5, 0xd4, 0x23, 0xdc, 0x8b, 0x26, 0x9e, 0xae, 0xb7, 0x50, 0xcd, 0xc4, 0x25, 0xf6, 0x75, 0x40, 0x9c, 0x37, 0x79, 0x33, 0x60, 0xd4, 0x4b, 0x13, 0x32, 0xee, 0xe2,
- /* (2^ 35)P */ 0x43, 0xb8, 0x56, 0x59, 0xf0, 0x68, 0x23, 0xb3, 0xea, 0x70, 0x58, 0x4c, 0x1e, 0x5a, 0x16, 0x54, 0x03, 0xb2, 0xf4, 0x73, 0xb6, 0xd9, 0x5c, 0x9c, 0x6f, 0xcf, 0x82, 0x2e, 0x54, 0x15, 0x46, 0x2c, 0xa3, 0xda, 0x4e, 0x87, 0xf5, 0x2b, 0xba, 0x91, 0xa3, 0xa0, 0x89, 0xba, 0x48, 0x2b, 0xfa, 0x64, 0x02, 0x7f, 0x78, 0x03, 0xd1, 0xe8, 0x3b, 0xe9,
- /* (2^ 36)P */ 0x15, 0xa4, 0x71, 0xd4, 0x0c, 0x24, 0xe9, 0x07, 0xa1, 0x43, 0xf4, 0x7f, 0xbb, 0xa2, 0xa6, 0x6b, 0xfa, 0xb7, 0xea, 0x58, 0xd1, 0x96, 0xb0, 0x24, 0x5c, 0xc7, 0x37, 0x4e, 0x60, 0x0f, 0x40, 0xf2, 0x2f, 0x44, 0x70, 0xea, 0x80, 0x63, 0xfe, 0xfc, 0x46, 0x59, 0x12, 0x27, 0xb5, 0x27, 0xfd, 0xb7, 0x73, 0x0b, 0xca, 0x8b, 0xc2, 0xd3, 0x71, 0x08,
- /* (2^ 37)P */ 0x26, 0x0e, 0xd7, 0x52, 0x6f, 0xf1, 0xf2, 0x9d, 0xb8, 0x3d, 0xbd, 0xd4, 0x75, 0x97, 0xd8, 0xbf, 0xa8, 0x86, 0x96, 0xa5, 0x80, 0xa0, 0x45, 0x75, 0xf6, 0x77, 0x71, 0xdb, 0x77, 0x96, 0x55, 0x99, 0x31, 0xd0, 0x4f, 0x34, 0xf4, 0x35, 0x39, 0x41, 0xd3, 0x7d, 0xf7, 0xe2, 0x74, 0xde, 0xbe, 0x5b, 0x1f, 0x39, 0x10, 0x21, 0xa3, 0x4d, 0x3b, 0xc8,
- /* (2^ 38)P */ 0x04, 0x00, 0x2a, 0x45, 0xb2, 0xaf, 0x9b, 0x18, 0x6a, 0xeb, 0x96, 0x28, 0xa4, 0x77, 0xd0, 0x13, 0xcf, 0x17, 0x65, 0xe8, 0xc5, 0x81, 0x28, 0xad, 0x39, 0x7a, 0x0b, 0xaa, 0x55, 0x2b, 0xf3, 0xfc, 0x86, 0x40, 0xad, 0x0d, 0x1e, 0x28, 0xa2, 0x2d, 0xc5, 0xd6, 0x04, 0x15, 0xa2, 0x30, 0x3d, 0x12, 0x8e, 0xd6, 0xb5, 0xf7, 0x69, 0xbb, 0x84, 0x20,
- /* (2^ 39)P */ 0xd7, 0x7a, 0x77, 0x2c, 0xfb, 0x81, 0x80, 0xe9, 0x1e, 0xc6, 0x36, 0x31, 0x79, 0xc3, 0x7c, 0xa9, 0x57, 0x6b, 0xb5, 0x70, 0xfb, 0xe4, 0xa1, 0xff, 0xfd, 0x21, 0xa5, 0x7c, 0xfa, 0x44, 0xba, 0x0d, 0x96, 0x3d, 0xc4, 0x5c, 0x39, 0x52, 0x87, 0xd7, 0x22, 0x0f, 0x52, 0x88, 0x91, 0x87, 0x96, 0xac, 0xfa, 0x3b, 0xdf, 0xdc, 0x83, 0x8c, 0x99, 0x29,
- /* (2^ 40)P */ 0x98, 0x6b, 0x3a, 0x8d, 0x83, 0x17, 0xe1, 0x62, 0xd8, 0x80, 0x4c, 0x97, 0xce, 0x6b, 0xaa, 0x10, 0xa7, 0xc4, 0xe9, 0xeb, 0xa5, 0xfb, 0xc9, 0xdd, 0x2d, 0xeb, 0xfc, 0x9a, 0x71, 0xcd, 0x68, 0x6e, 0xc0, 0x35, 0x64, 0x62, 0x1b, 0x95, 0x12, 0xe8, 0x53, 0xec, 0xf0, 0xf4, 0x86, 0x86, 0x78, 0x18, 0xc4, 0xc6, 0xbc, 0x5a, 0x59, 0x8f, 0x7c, 0x7e,
- /* (2^ 41)P */ 0x7f, 0xd7, 0x1e, 0xc5, 0x83, 0xdc, 0x1f, 0xbe, 0x0b, 0xcf, 0x2e, 0x01, 0x01, 0xed, 0xac, 0x17, 0x3b, 0xed, 0xa4, 0x30, 0x96, 0x0e, 0x14, 0x7e, 0x19, 0x2b, 0xa5, 0x67, 0x1e, 0xb3, 0x34, 0x03, 0xa8, 0xbb, 0x0a, 0x7d, 0x08, 0x2d, 0xd5, 0x53, 0x19, 0x6f, 0x13, 0xd5, 0xc0, 0x90, 0x8a, 0xcc, 0xc9, 0x5c, 0xab, 0x24, 0xd7, 0x03, 0xf6, 0x57,
- /* (2^ 42)P */ 0x49, 0xcb, 0xb4, 0x96, 0x5f, 0xa6, 0xf8, 0x71, 0x6f, 0x59, 0xad, 0x05, 0x24, 0x2d, 0xaf, 0x67, 0xa8, 0xbe, 0x95, 0xdf, 0x0d, 0x28, 0x5a, 0x7f, 0x6e, 0x87, 0x8c, 0x6e, 0x67, 0x0c, 0xf4, 0xe0, 0x1c, 0x30, 0xc2, 0x66, 0xae, 0x20, 0xa1, 0x34, 0xec, 0x9c, 0xbc, 0xae, 0x3d, 0xa1, 0x28, 0x28, 0x95, 0x1d, 0xc9, 0x3a, 0xa8, 0xfd, 0xfc, 0xa1,
- /* (2^ 43)P */ 0xe2, 0x2b, 0x9d, 0xed, 0x02, 0x99, 0x67, 0xbb, 0x2e, 0x16, 0x62, 0x05, 0x70, 0xc7, 0x27, 0xb9, 0x1c, 0x3f, 0xf2, 0x11, 0x01, 0xd8, 0x51, 0xa4, 0x18, 0x92, 0xa9, 0x5d, 0xfb, 0xa9, 0xe4, 0x42, 0xba, 0x38, 0x34, 0x1a, 0x4a, 0xc5, 0x6a, 0x37, 0xde, 0xa7, 0x0c, 0xb4, 0x7e, 0x7f, 0xde, 0xa6, 0xee, 0xcd, 0x55, 0x57, 0x05, 0x06, 0xfd, 0x5d,
- /* (2^ 44)P */ 0x2f, 0x32, 0xcf, 0x2e, 0x2c, 0x7b, 0xbe, 0x9a, 0x0c, 0x57, 0x35, 0xf8, 0x87, 0xda, 0x9c, 0xec, 0x48, 0xf2, 0xbb, 0xe2, 0xda, 0x10, 0x58, 0x20, 0xc6, 0xd3, 0x87, 0xe9, 0xc7, 0x26, 0xd1, 0x9a, 0x46, 0x87, 0x90, 0xda, 0xdc, 0xde, 0xc3, 0xb3, 0xf2, 0xe8, 0x6f, 0x4a, 0xe6, 0xe8, 0x9d, 0x98, 0x36, 0x20, 0x03, 0x47, 0x15, 0x3f, 0x64, 0x59,
- /* (2^ 45)P */ 0xd4, 0x71, 0x49, 0x0a, 0x67, 0x97, 0xaa, 0x3f, 0xf4, 0x1b, 0x3a, 0x6e, 0x5e, 0x17, 0xcc, 0x0a, 0x8f, 0x81, 0x6a, 0x41, 0x38, 0x77, 0x40, 0x8a, 0x11, 0x42, 0x62, 0xd2, 0x50, 0x32, 0x79, 0x78, 0x28, 0xc2, 0x2e, 0x10, 0x01, 0x94, 0x30, 0x4f, 0x7f, 0x18, 0x17, 0x56, 0x85, 0x4e, 0xad, 0xf7, 0xcb, 0x87, 0x3c, 0x3f, 0x50, 0x2c, 0xc0, 0xba,
- /* (2^ 46)P */ 0xbc, 0x30, 0x8e, 0x65, 0x8e, 0x57, 0x5b, 0x38, 0x7a, 0xd4, 0x95, 0x52, 0x7a, 0x32, 0x59, 0x69, 0xcd, 0x9d, 0x47, 0x34, 0x5b, 0x55, 0xa5, 0x24, 0x60, 0xdd, 0xc0, 0xc1, 0x62, 0x73, 0x44, 0xae, 0x4c, 0x9c, 0x65, 0x55, 0x1b, 0x9d, 0x8a, 0x29, 0xb0, 0x1a, 0x52, 0xa8, 0xf1, 0xe6, 0x9a, 0xb3, 0xf6, 0xa3, 0xc9, 0x0a, 0x70, 0x7d, 0x0f, 0xee,
- /* (2^ 47)P */ 0x77, 0xd3, 0xe5, 0x8e, 0xfa, 0x00, 0xeb, 0x1b, 0x7f, 0xdc, 0x68, 0x3f, 0x92, 0xbd, 0xb7, 0x0b, 0xb7, 0xb5, 0x24, 0xdf, 0xc5, 0x67, 0x53, 0xd4, 0x36, 0x79, 0xc4, 0x7b, 0x57, 0xbc, 0x99, 0x97, 0x60, 0xef, 0xe4, 0x01, 0xa1, 0xa7, 0xaa, 0x12, 0x36, 0x29, 0xb1, 0x03, 0xc2, 0x83, 0x1c, 0x2b, 0x83, 0xef, 0x2e, 0x2c, 0x23, 0x92, 0xfd, 0xd1,
- /* (2^ 48)P */ 0x94, 0xef, 0x03, 0x59, 0xfa, 0x8a, 0x18, 0x76, 0xee, 0x58, 0x08, 0x4d, 0x44, 0xce, 0xf1, 0x52, 0x33, 0x49, 0xf6, 0x69, 0x71, 0xe3, 0xa9, 0xbc, 0x86, 0xe3, 0x43, 0xde, 0x33, 0x7b, 0x90, 0x8b, 0x3e, 0x7d, 0xd5, 0x4a, 0xf0, 0x23, 0x99, 0xa6, 0xea, 0x5f, 0x08, 0xe5, 0xb9, 0x49, 0x8b, 0x0d, 0x6a, 0x21, 0xab, 0x07, 0x62, 0xcd, 0xc4, 0xbe,
- /* (2^ 49)P */ 0x61, 0xbf, 0x70, 0x14, 0xfa, 0x4e, 0x9e, 0x7c, 0x0c, 0xf8, 0xb2, 0x48, 0x71, 0x62, 0x83, 0xd6, 0xd1, 0xdc, 0x9c, 0x29, 0x66, 0xb1, 0x34, 0x9c, 0x8d, 0xe6, 0x88, 0xaf, 0xbe, 0xdc, 0x4d, 0xeb, 0xb0, 0xe7, 0x28, 0xae, 0xb2, 0x05, 0x56, 0xc6, 0x0e, 0x10, 0x26, 0xab, 0x2c, 0x59, 0x72, 0x03, 0x66, 0xfe, 0x8f, 0x2c, 0x51, 0x2d, 0xdc, 0xae,
- /* (2^ 50)P */ 0xdc, 0x63, 0xf1, 0x8b, 0x5c, 0x65, 0x0b, 0xf1, 0xa6, 0x22, 0xe2, 0xd9, 0xdb, 0x49, 0xb1, 0x3c, 0x47, 0xc2, 0xfe, 0xac, 0x86, 0x07, 0x52, 0xec, 0xb0, 0x08, 0x69, 0xfb, 0xd1, 0x06, 0xdc, 0x48, 0x5c, 0x3d, 0xb2, 0x4d, 0xb8, 0x1a, 0x4e, 0xda, 0xb9, 0xc1, 0x2b, 0xab, 0x4b, 0x62, 0x81, 0x21, 0x9a, 0xfc, 0x3d, 0x39, 0x83, 0x11, 0x36, 0xeb,
- /* (2^ 51)P */ 0x94, 0xf3, 0x17, 0xef, 0xf9, 0x60, 0x54, 0xc3, 0xd7, 0x27, 0x35, 0xc5, 0x98, 0x5e, 0xf6, 0x63, 0x6c, 0xa0, 0x4a, 0xd3, 0xa3, 0x98, 0xd9, 0x42, 0xe3, 0xf1, 0xf8, 0x81, 0x96, 0xa9, 0xea, 0x6d, 0x4b, 0x8e, 0x33, 0xca, 0x94, 0x0d, 0xa0, 0xf7, 0xbb, 0x64, 0xa3, 0x36, 0x6f, 0xdc, 0x5a, 0x94, 0x42, 0xca, 0x06, 0xb2, 0x2b, 0x9a, 0x9f, 0x71,
- /* (2^ 52)P */ 0xec, 0xdb, 0xa6, 0x1f, 0xdf, 0x15, 0x36, 0xa3, 0xda, 0x8a, 0x7a, 0xb6, 0xa7, 0xe3, 0xaf, 0x52, 0xe0, 0x8d, 0xe8, 0xf2, 0x44, 0x20, 0xeb, 0xa1, 0x20, 0xc4, 0x65, 0x3c, 0x7c, 0x6c, 0x49, 0xed, 0x2f, 0x66, 0x23, 0x68, 0x61, 0x91, 0x40, 0x9f, 0x50, 0x19, 0xd1, 0x84, 0xa7, 0xe2, 0xed, 0x34, 0x37, 0xe3, 0xe4, 0x11, 0x7f, 0x87, 0x55, 0x0f,
- /* (2^ 53)P */ 0xb3, 0xa1, 0x0f, 0xb0, 0x48, 0xc0, 0x4d, 0x96, 0xa7, 0xcf, 0x5a, 0x81, 0xb8, 0x4a, 0x46, 0xef, 0x0a, 0xd3, 0x40, 0x7e, 0x02, 0xe3, 0x63, 0xaa, 0x50, 0xd1, 0x2a, 0x37, 0x22, 0x4a, 0x7f, 0x4f, 0xb6, 0xf9, 0x01, 0x82, 0x78, 0x3d, 0x93, 0x14, 0x11, 0x8a, 0x90, 0x60, 0xcd, 0x45, 0x4e, 0x7b, 0x42, 0xb9, 0x3e, 0x6e, 0x68, 0x1f, 0x36, 0x41,
- /* (2^ 54)P */ 0x13, 0x73, 0x0e, 0x4f, 0x79, 0x93, 0x9e, 0x29, 0x70, 0x7b, 0x4a, 0x59, 0x1a, 0x9a, 0xf4, 0x55, 0x08, 0xf0, 0xdb, 0x17, 0x58, 0xec, 0x64, 0xad, 0x7f, 0x29, 0xeb, 0x3f, 0x85, 0x4e, 0x60, 0x28, 0x98, 0x1f, 0x73, 0x4e, 0xe6, 0xa8, 0xab, 0xd5, 0xd6, 0xfc, 0xa1, 0x36, 0x6d, 0x15, 0xc6, 0x13, 0x83, 0xa0, 0xc2, 0x6e, 0xd9, 0xdb, 0xc9, 0xcc,
- /* (2^ 55)P */ 0xff, 0xd8, 0x52, 0xa3, 0xdc, 0x99, 0xcf, 0x3e, 0x19, 0xb3, 0x68, 0xd0, 0xb5, 0x0d, 0xb8, 0xee, 0x3f, 0xef, 0x6e, 0xc0, 0x38, 0x28, 0x44, 0x92, 0x78, 0x91, 0x1a, 0x08, 0x78, 0x6c, 0x65, 0x24, 0xf3, 0xa2, 0x3d, 0xf2, 0xe5, 0x79, 0x62, 0x69, 0x29, 0xf4, 0x22, 0xc5, 0xdb, 0x6a, 0xae, 0xf4, 0x44, 0xa3, 0x6f, 0xc7, 0x86, 0xab, 0xef, 0xef,
- /* (2^ 56)P */ 0xbf, 0x54, 0x9a, 0x09, 0x5d, 0x17, 0xd0, 0xde, 0xfb, 0xf5, 0xca, 0xff, 0x13, 0x20, 0x88, 0x82, 0x3a, 0xe2, 0xd0, 0x3b, 0xfb, 0x05, 0x76, 0xd1, 0xc0, 0x02, 0x71, 0x3b, 0x94, 0xe8, 0xc9, 0x84, 0xcf, 0xa4, 0xe9, 0x28, 0x7b, 0xf5, 0x09, 0xc3, 0x2b, 0x22, 0x40, 0xf1, 0x68, 0x24, 0x24, 0x7d, 0x9f, 0x6e, 0xcd, 0xfe, 0xb0, 0x19, 0x61, 0xf5,
- /* (2^ 57)P */ 0xe8, 0x63, 0x51, 0xb3, 0x95, 0x6b, 0x7b, 0x74, 0x92, 0x52, 0x45, 0xa4, 0xed, 0xea, 0x0e, 0x0d, 0x2b, 0x01, 0x1e, 0x2c, 0xbc, 0x91, 0x06, 0x69, 0xdb, 0x1f, 0xb5, 0x77, 0x1d, 0x56, 0xf5, 0xb4, 0x02, 0x80, 0x49, 0x56, 0x12, 0xce, 0x86, 0x05, 0xc9, 0xd9, 0xae, 0xf3, 0x6d, 0xe6, 0x3f, 0x40, 0x52, 0xe9, 0x49, 0x2b, 0x31, 0x06, 0x86, 0x14,
- /* (2^ 58)P */ 0xf5, 0x09, 0x3b, 0xd2, 0xff, 0xdf, 0x11, 0xa5, 0x1c, 0x99, 0xe8, 0x1b, 0xa4, 0x2c, 0x7d, 0x8e, 0xc8, 0xf7, 0x03, 0x46, 0xfa, 0xb6, 0xde, 0x73, 0x91, 0x7e, 0x5a, 0x7a, 0xd7, 0x9a, 0x5b, 0x80, 0x24, 0x62, 0x5e, 0x92, 0xf1, 0xa3, 0x45, 0xa3, 0x43, 0x92, 0x8a, 0x2a, 0x5b, 0x0c, 0xb4, 0xc8, 0xad, 0x1c, 0xb6, 0x6c, 0x5e, 0x81, 0x18, 0x91,
- /* (2^ 59)P */ 0x96, 0xb3, 0xca, 0x2b, 0xe3, 0x7a, 0x59, 0x72, 0x17, 0x74, 0x29, 0x21, 0xe7, 0x78, 0x07, 0xad, 0xda, 0xb6, 0xcd, 0xf9, 0x27, 0x4d, 0xc8, 0xf2, 0x98, 0x22, 0xca, 0xf2, 0x33, 0x74, 0x7a, 0xdd, 0x1e, 0x71, 0xec, 0xe3, 0x3f, 0xe2, 0xa2, 0xd2, 0x38, 0x75, 0xb0, 0xd0, 0x0a, 0xcf, 0x7d, 0x36, 0xdc, 0x49, 0x38, 0x25, 0x34, 0x4f, 0x20, 0x9a,
- /* (2^ 60)P */ 0x2b, 0x6e, 0x04, 0x0d, 0x4f, 0x3d, 0x3b, 0x24, 0xf6, 0x4e, 0x5e, 0x0a, 0xbd, 0x48, 0x96, 0xba, 0x81, 0x8f, 0x39, 0x82, 0x13, 0xe6, 0x72, 0xf3, 0x0f, 0xb6, 0x94, 0xf4, 0xc5, 0x90, 0x74, 0x91, 0xa8, 0xf2, 0xc9, 0xca, 0x9a, 0x4d, 0x98, 0xf2, 0xdf, 0x52, 0x4e, 0x97, 0x2f, 0xeb, 0x84, 0xd3, 0xaf, 0xc2, 0xcc, 0xfb, 0x4c, 0x26, 0x4b, 0xe4,
- /* (2^ 61)P */ 0x12, 0x9e, 0xfb, 0x9d, 0x78, 0x79, 0x99, 0xdd, 0xb3, 0x0b, 0x2e, 0x56, 0x41, 0x8e, 0x3f, 0x39, 0xb8, 0x97, 0x89, 0x53, 0x9b, 0x8a, 0x3c, 0x40, 0x9d, 0xa4, 0x6c, 0x2e, 0x31, 0x71, 0xc6, 0x0a, 0x41, 0xd4, 0x95, 0x06, 0x5e, 0xc1, 0xab, 0xc2, 0x14, 0xc4, 0xc7, 0x15, 0x08, 0x3a, 0xad, 0x7a, 0xb4, 0x62, 0xa3, 0x0c, 0x90, 0xf4, 0x47, 0x08,
- /* (2^ 62)P */ 0x7f, 0xec, 0x09, 0x82, 0xf5, 0x94, 0x09, 0x93, 0x32, 0xd3, 0xdc, 0x56, 0x80, 0x7b, 0x5b, 0x22, 0x80, 0x6a, 0x96, 0x72, 0xb1, 0xc2, 0xd9, 0xa1, 0x8b, 0x66, 0x42, 0x16, 0xe2, 0x07, 0xb3, 0x2d, 0xf1, 0x75, 0x35, 0x72, 0xc7, 0x98, 0xbe, 0x63, 0x3b, 0x20, 0x75, 0x05, 0xc1, 0x3e, 0x31, 0x5a, 0xf7, 0xaa, 0xae, 0x4b, 0xdb, 0x1d, 0xd0, 0x74,
- /* (2^ 63)P */ 0x36, 0x5c, 0x74, 0xe6, 0x5d, 0x59, 0x3f, 0x15, 0x4b, 0x4d, 0x4e, 0x67, 0x41, 0xfe, 0x98, 0x1f, 0x49, 0x76, 0x91, 0x0f, 0x9b, 0xf4, 0xaf, 0x86, 0xaf, 0x66, 0x19, 0xed, 0x46, 0xf1, 0x05, 0x9a, 0xcc, 0xd1, 0x14, 0x1f, 0x82, 0x12, 0x8e, 0xe6, 0xf4, 0xc3, 0x42, 0x5c, 0x4e, 0x33, 0x93, 0xbe, 0x30, 0xe7, 0x64, 0xa9, 0x35, 0x00, 0x4d, 0xf9,
- /* (2^ 64)P */ 0x1f, 0xc1, 0x1e, 0xb7, 0xe3, 0x7c, 0xfa, 0xa3, 0x6b, 0x76, 0xaf, 0x9c, 0x05, 0x85, 0x4a, 0xa9, 0xfb, 0xe3, 0x7e, 0xf2, 0x49, 0x56, 0xdc, 0x2f, 0x57, 0x10, 0xba, 0x37, 0xb2, 0x62, 0xf5, 0x6b, 0xe5, 0x8f, 0x0a, 0x87, 0xd1, 0x6a, 0xcb, 0x9d, 0x07, 0xd0, 0xf6, 0x38, 0x99, 0x2c, 0x61, 0x4a, 0x4e, 0xd8, 0xd2, 0x88, 0x29, 0x99, 0x11, 0x95,
- /* (2^ 65)P */ 0x6f, 0xdc, 0xd5, 0xd6, 0xd6, 0xa7, 0x4c, 0x46, 0x93, 0x65, 0x62, 0x23, 0x95, 0x32, 0x9c, 0xde, 0x40, 0x41, 0x68, 0x2c, 0x18, 0x4e, 0x5a, 0x8c, 0xc0, 0xc5, 0xc5, 0xea, 0x5c, 0x45, 0x0f, 0x60, 0x78, 0x39, 0xb6, 0x36, 0x23, 0x12, 0xbc, 0x21, 0x9a, 0xf8, 0x91, 0xac, 0xc4, 0x70, 0xdf, 0x85, 0x8e, 0x3c, 0xec, 0x22, 0x04, 0x98, 0xa8, 0xaa,
- /* (2^ 66)P */ 0xcc, 0x52, 0x10, 0x5b, 0x4b, 0x6c, 0xc5, 0xfa, 0x3e, 0xd4, 0xf8, 0x1c, 0x04, 0x14, 0x48, 0x33, 0xd9, 0xfc, 0x5f, 0xb0, 0xa5, 0x48, 0x8c, 0x45, 0x8a, 0xee, 0x3e, 0xa7, 0xc1, 0x2e, 0x34, 0xca, 0xf6, 0xc9, 0xeb, 0x10, 0xbb, 0xe1, 0x59, 0x84, 0x25, 0xe8, 0x81, 0x70, 0xc0, 0x09, 0x42, 0xa7, 0x3b, 0x0d, 0x33, 0x00, 0xb5, 0x77, 0xbe, 0x25,
- /* (2^ 67)P */ 0xcd, 0x1f, 0xbc, 0x7d, 0xef, 0xe5, 0xca, 0x91, 0xaf, 0xa9, 0x59, 0x6a, 0x09, 0xca, 0xd6, 0x1b, 0x3d, 0x55, 0xde, 0xa2, 0x6a, 0x80, 0xd6, 0x95, 0x47, 0xe4, 0x5f, 0x68, 0x54, 0x08, 0xdf, 0x29, 0xba, 0x2a, 0x02, 0x84, 0xe8, 0xe9, 0x00, 0x77, 0x99, 0x36, 0x03, 0xf6, 0x4a, 0x3e, 0x21, 0x81, 0x7d, 0xb8, 0xa4, 0x8a, 0xa2, 0x05, 0xef, 0xbc,
- /* (2^ 68)P */ 0x7c, 0x59, 0x5f, 0x66, 0xd9, 0xb7, 0x83, 0x43, 0x8a, 0xa1, 0x8d, 0x51, 0x70, 0xba, 0xf2, 0x9b, 0x95, 0xc0, 0x4b, 0x4c, 0xa0, 0x14, 0xd3, 0xa4, 0x5d, 0x4a, 0x37, 0x36, 0x97, 0x31, 0x1e, 0x12, 0xe7, 0xbb, 0x08, 0x67, 0xa5, 0x23, 0xd7, 0xfb, 0x97, 0xd8, 0x6a, 0x03, 0xb1, 0xf8, 0x7f, 0xda, 0x58, 0xd9, 0x3f, 0x73, 0x4a, 0x53, 0xe1, 0x7b,
- /* (2^ 69)P */ 0x55, 0x83, 0x98, 0x78, 0x6c, 0x56, 0x5e, 0xed, 0xf7, 0x23, 0x3e, 0x4c, 0x7d, 0x09, 0x2d, 0x09, 0x9c, 0x58, 0x8b, 0x32, 0xca, 0xfe, 0xbf, 0x47, 0x03, 0xeb, 0x4d, 0xe7, 0xeb, 0x9c, 0x83, 0x05, 0x68, 0xaa, 0x80, 0x89, 0x44, 0xf9, 0xd4, 0xdc, 0xdb, 0xb1, 0xdb, 0x77, 0xac, 0xf9, 0x2a, 0xae, 0x35, 0xac, 0x74, 0xb5, 0x95, 0x62, 0x18, 0x85,
- /* (2^ 70)P */ 0xab, 0x82, 0x7e, 0x10, 0xd7, 0xe6, 0x57, 0xd1, 0x66, 0x12, 0x31, 0x9c, 0x9c, 0xa6, 0x27, 0x59, 0x71, 0x2e, 0xeb, 0xa0, 0x68, 0xc5, 0x87, 0x51, 0xf4, 0xca, 0x3f, 0x98, 0x56, 0xb0, 0x89, 0xb1, 0xc7, 0x7b, 0x46, 0xb3, 0xae, 0x36, 0xf2, 0xee, 0x15, 0x1a, 0x60, 0xf4, 0x50, 0x76, 0x4f, 0xc4, 0x53, 0x0d, 0x36, 0x4d, 0x31, 0xb1, 0x20, 0x51,
- /* (2^ 71)P */ 0xf7, 0x1d, 0x8c, 0x1b, 0x5e, 0xe5, 0x02, 0x6f, 0xc5, 0xa5, 0xe0, 0x5f, 0xc6, 0xb6, 0x63, 0x43, 0xaf, 0x3c, 0x19, 0x6c, 0xf4, 0xaf, 0xa4, 0x33, 0xb1, 0x0a, 0x37, 0x3d, 0xd9, 0x4d, 0xe2, 0x29, 0x24, 0x26, 0x94, 0x7c, 0x02, 0xe4, 0xe2, 0xf2, 0xbe, 0xbd, 0xac, 0x1b, 0x48, 0xb8, 0xdd, 0xe9, 0x0d, 0x9a, 0x50, 0x1a, 0x98, 0x71, 0x6e, 0xdc,
- /* (2^ 72)P */ 0x9f, 0x40, 0xb1, 0xb3, 0x66, 0x28, 0x6c, 0xfe, 0xa6, 0x7d, 0xf8, 0x3e, 0xb8, 0xf3, 0xde, 0x52, 0x76, 0x52, 0xa3, 0x92, 0x98, 0x23, 0xab, 0x4f, 0x88, 0x97, 0xfc, 0x22, 0xe1, 0x6b, 0x67, 0xcd, 0x13, 0x95, 0xda, 0x65, 0xdd, 0x3b, 0x67, 0x3f, 0x5f, 0x4c, 0xf2, 0x8a, 0xad, 0x98, 0xa7, 0x94, 0x24, 0x45, 0x87, 0x11, 0x7c, 0x75, 0x79, 0x85,
- /* (2^ 73)P */ 0x70, 0xbf, 0xf9, 0x3b, 0xa9, 0x44, 0x57, 0x72, 0x96, 0xc9, 0xa4, 0x98, 0x65, 0xbf, 0x87, 0xb3, 0x3a, 0x39, 0x12, 0xde, 0xe5, 0x39, 0x01, 0x4f, 0xf7, 0xc0, 0x71, 0x52, 0x36, 0x85, 0xb3, 0x18, 0xf8, 0x14, 0xc0, 0x6d, 0xae, 0x9e, 0x4f, 0xb0, 0x72, 0x87, 0xac, 0x5c, 0xd1, 0x6c, 0x41, 0x6c, 0x90, 0x9d, 0x22, 0x81, 0xe4, 0x2b, 0xea, 0xe5,
- /* (2^ 74)P */ 0xfc, 0xea, 0x1a, 0x65, 0xd9, 0x49, 0x6a, 0x39, 0xb5, 0x96, 0x72, 0x7b, 0x32, 0xf1, 0xd0, 0xe9, 0x45, 0xd9, 0x31, 0x55, 0xc7, 0x34, 0xe9, 0x5a, 0xec, 0x73, 0x0b, 0x03, 0xc4, 0xb3, 0xe6, 0xc9, 0x5e, 0x0a, 0x17, 0xfe, 0x53, 0x66, 0x7f, 0x21, 0x18, 0x74, 0x54, 0x1b, 0xc9, 0x49, 0x16, 0xd2, 0x48, 0xaf, 0x5b, 0x47, 0x7b, 0xeb, 0xaa, 0xc9,
- /* (2^ 75)P */ 0x47, 0x04, 0xf5, 0x5a, 0x87, 0x77, 0x9e, 0x21, 0x34, 0x4e, 0x83, 0x88, 0xaf, 0x02, 0x1d, 0xb0, 0x5a, 0x1d, 0x1d, 0x7d, 0x8d, 0x2c, 0xd3, 0x8d, 0x63, 0xa9, 0x45, 0xfb, 0x15, 0x6d, 0x86, 0x45, 0xcd, 0x38, 0x0e, 0xf7, 0x37, 0x79, 0xed, 0x6d, 0x5a, 0xbc, 0x32, 0xcc, 0x66, 0xf1, 0x3a, 0xb2, 0x87, 0x6f, 0x70, 0x71, 0xd9, 0xf2, 0xfa, 0x7b,
- /* (2^ 76)P */ 0x68, 0x07, 0xdc, 0x61, 0x40, 0xe4, 0xec, 0x32, 0xc8, 0xbe, 0x66, 0x30, 0x54, 0x80, 0xfd, 0x13, 0x7a, 0xef, 0xae, 0xed, 0x2e, 0x00, 0x6d, 0x3f, 0xbd, 0xfc, 0x91, 0x24, 0x53, 0x7f, 0x63, 0x9d, 0x2e, 0xe3, 0x76, 0xe0, 0xf3, 0xe1, 0x8f, 0x7a, 0xc4, 0x77, 0x0c, 0x91, 0xc0, 0xc2, 0x18, 0x6b, 0x04, 0xad, 0xb6, 0x70, 0x9a, 0x64, 0xc5, 0x82,
- /* (2^ 77)P */ 0x7f, 0xea, 0x13, 0xd8, 0x9e, 0xfc, 0x5b, 0x06, 0xb5, 0x4f, 0xda, 0x38, 0xe0, 0x9c, 0xd2, 0x3a, 0xc1, 0x1c, 0x62, 0x70, 0x7f, 0xc6, 0x24, 0x0a, 0x47, 0x04, 0x01, 0xc4, 0x55, 0x09, 0xd1, 0x7a, 0x07, 0xba, 0xa3, 0x80, 0x4f, 0xc1, 0x65, 0x36, 0x6d, 0xc0, 0x10, 0xcf, 0x94, 0xa9, 0xa2, 0x01, 0x44, 0xd1, 0xf9, 0x1c, 0x4c, 0xfb, 0xf8, 0x99,
- /* (2^ 78)P */ 0x6c, 0xb9, 0x6b, 0xee, 0x43, 0x5b, 0xb9, 0xbb, 0xee, 0x2e, 0x52, 0xc1, 0xc6, 0xb9, 0x61, 0xd2, 0x93, 0xa5, 0xaf, 0x52, 0xf4, 0xa4, 0x1a, 0x51, 0x61, 0xa7, 0xcb, 0x9e, 0xbb, 0x56, 0x65, 0xe2, 0xbf, 0x75, 0xb9, 0x9c, 0x50, 0x96, 0x60, 0x81, 0x74, 0x47, 0xc0, 0x04, 0x88, 0x71, 0x76, 0x39, 0x9a, 0xa7, 0xb1, 0x4e, 0x43, 0x15, 0xe0, 0xbb,
- /* (2^ 79)P */ 0xbb, 0xce, 0xe2, 0xbb, 0xf9, 0x17, 0x0f, 0x82, 0x40, 0xad, 0x73, 0xe3, 0xeb, 0x3b, 0x06, 0x1a, 0xcf, 0x8e, 0x6e, 0x28, 0xb8, 0x26, 0xd9, 0x5b, 0xb7, 0xb3, 0xcf, 0xb4, 0x6a, 0x1c, 0xbf, 0x7f, 0xb8, 0xb5, 0x79, 0xcf, 0x45, 0x68, 0x7d, 0xc5, 0xeb, 0xf3, 0xbe, 0x39, 0x40, 0xfc, 0x07, 0x90, 0x7a, 0x62, 0xad, 0x86, 0x08, 0x71, 0x25, 0xe1,
- /* (2^ 80)P */ 0x9b, 0x46, 0xac, 0xef, 0xc1, 0x4e, 0xa1, 0x97, 0x95, 0x76, 0xf9, 0x1b, 0xc2, 0xb2, 0x6a, 0x41, 0xea, 0x80, 0x3d, 0xe9, 0x08, 0x52, 0x5a, 0xe3, 0xf2, 0x08, 0xc5, 0xea, 0x39, 0x3f, 0x44, 0x71, 0x4d, 0xea, 0x0d, 0x05, 0x23, 0xe4, 0x2e, 0x3c, 0x89, 0xfe, 0x12, 0x8a, 0x95, 0x42, 0x0a, 0x68, 0xea, 0x5a, 0x28, 0x06, 0x9e, 0xe3, 0x5f, 0xe0,
- /* (2^ 81)P */ 0x00, 0x61, 0x6c, 0x98, 0x9b, 0xe7, 0xb9, 0x06, 0x1c, 0xc5, 0x1b, 0xed, 0xbe, 0xc8, 0xb3, 0xea, 0x87, 0xf0, 0xc4, 0x24, 0x7d, 0xbb, 0x5d, 0xa4, 0x1d, 0x7a, 0x16, 0x00, 0x55, 0x94, 0x67, 0x78, 0xbd, 0x58, 0x02, 0x82, 0x90, 0x53, 0x76, 0xd4, 0x72, 0x99, 0x51, 0x6f, 0x7b, 0xcf, 0x80, 0x30, 0x31, 0x3b, 0x01, 0xc7, 0xc1, 0xef, 0xe6, 0x42,
- /* (2^ 82)P */ 0xe2, 0x35, 0xaf, 0x4b, 0x79, 0xc6, 0x12, 0x24, 0x99, 0xc0, 0x68, 0xb0, 0x43, 0x3e, 0xe5, 0xef, 0xe2, 0x29, 0xea, 0xb8, 0xb3, 0xbc, 0x6a, 0x53, 0x2c, 0x69, 0x18, 0x5a, 0xf9, 0x15, 0xae, 0x66, 0x58, 0x18, 0xd3, 0x2d, 0x4b, 0x00, 0xfd, 0x84, 0xab, 0x4f, 0xae, 0x70, 0x6b, 0x9e, 0x9a, 0xdf, 0x83, 0xfd, 0x2e, 0x3c, 0xcf, 0xf8, 0x88, 0x5b,
- /* (2^ 83)P */ 0xa4, 0x90, 0x31, 0x85, 0x13, 0xcd, 0xdf, 0x64, 0xc9, 0xa1, 0x0b, 0xe7, 0xb6, 0x73, 0x8a, 0x1b, 0x22, 0x78, 0x4c, 0xd4, 0xae, 0x48, 0x18, 0x00, 0x00, 0xa8, 0x9f, 0x06, 0xf9, 0xfb, 0x2d, 0xc3, 0xb1, 0x2a, 0xbc, 0x13, 0x99, 0x57, 0xaf, 0xf0, 0x8d, 0x61, 0x54, 0x29, 0xd5, 0xf2, 0x72, 0x00, 0x96, 0xd1, 0x85, 0x12, 0x8a, 0xf0, 0x23, 0xfb,
- /* (2^ 84)P */ 0x69, 0xc7, 0xdb, 0xd9, 0x92, 0x75, 0x08, 0x9b, 0xeb, 0xa5, 0x93, 0xd1, 0x1a, 0xf4, 0xf5, 0xaf, 0xe6, 0xc4, 0x4a, 0x0d, 0x35, 0x26, 0x39, 0x9d, 0xd3, 0x17, 0x3e, 0xae, 0x2d, 0xbf, 0x73, 0x9f, 0xb7, 0x74, 0x91, 0xd1, 0xd8, 0x5c, 0x14, 0xf9, 0x75, 0xdf, 0xeb, 0xc2, 0x22, 0xd8, 0x14, 0x8d, 0x86, 0x23, 0x4d, 0xd1, 0x2d, 0xdb, 0x6b, 0x42,
- /* (2^ 85)P */ 0x8c, 0xda, 0xc6, 0xf8, 0x71, 0xba, 0x2b, 0x06, 0x78, 0xae, 0xcc, 0x3a, 0xe3, 0xe3, 0xa1, 0x8b, 0xe2, 0x34, 0x6d, 0x28, 0x9e, 0x46, 0x13, 0x4d, 0x9e, 0xa6, 0x73, 0x49, 0x65, 0x79, 0x88, 0xb9, 0x3a, 0xd1, 0x6d, 0x2f, 0x48, 0x2b, 0x0a, 0x7f, 0x58, 0x20, 0x37, 0xf4, 0x0e, 0xbb, 0x4a, 0x95, 0x58, 0x0c, 0x88, 0x30, 0xc4, 0x74, 0xdd, 0xfd,
- /* (2^ 86)P */ 0x6d, 0x13, 0x4e, 0x89, 0x2d, 0xa9, 0xa3, 0xed, 0x09, 0xe3, 0x0e, 0x71, 0x3e, 0x4a, 0xab, 0x90, 0xde, 0x03, 0xeb, 0x56, 0x46, 0x60, 0x06, 0xf5, 0x71, 0xe5, 0xee, 0x9b, 0xef, 0xff, 0xc4, 0x2c, 0x9f, 0x37, 0x48, 0x45, 0x94, 0x12, 0x41, 0x81, 0x15, 0x70, 0x91, 0x99, 0x5e, 0x56, 0x6b, 0xf4, 0xa6, 0xc9, 0xf5, 0x69, 0x9d, 0x78, 0x37, 0x57,
- /* (2^ 87)P */ 0xf3, 0x51, 0x57, 0x7e, 0x43, 0x6f, 0xc6, 0x67, 0x59, 0x0c, 0xcf, 0x94, 0xe6, 0x3d, 0xb5, 0x07, 0xc9, 0x77, 0x48, 0xc9, 0x68, 0x0d, 0x98, 0x36, 0x62, 0x35, 0x38, 0x1c, 0xf5, 0xc5, 0xec, 0x66, 0x78, 0xfe, 0x47, 0xab, 0x26, 0xd6, 0x44, 0xb6, 0x06, 0x0f, 0x89, 0xe3, 0x19, 0x40, 0x1a, 0xe7, 0xd8, 0x65, 0x55, 0xf7, 0x1a, 0xfc, 0xa3, 0x0e,
- /* (2^ 88)P */ 0x0e, 0x30, 0xa6, 0xb7, 0x58, 0x60, 0x62, 0x2a, 0x6c, 0x13, 0xa8, 0x14, 0x9b, 0xb8, 0xf2, 0x70, 0xd8, 0xb1, 0x71, 0x88, 0x8c, 0x18, 0x31, 0x25, 0x93, 0x90, 0xb4, 0xc7, 0x49, 0xd8, 0xd4, 0xdb, 0x1e, 0x1e, 0x7f, 0xaa, 0xba, 0xc9, 0xf2, 0x5d, 0xa9, 0x3a, 0x43, 0xb4, 0x5c, 0xee, 0x7b, 0xc7, 0x97, 0xb7, 0x66, 0xd7, 0x23, 0xd9, 0x22, 0x59,
- /* (2^ 89)P */ 0x28, 0x19, 0xa6, 0xf9, 0x89, 0x20, 0x78, 0xd4, 0x6d, 0xcb, 0x79, 0x8f, 0x61, 0x6f, 0xb2, 0x5c, 0x4f, 0xa6, 0x54, 0x84, 0x95, 0x24, 0x36, 0x64, 0xcb, 0x39, 0xe7, 0x8f, 0x97, 0x9c, 0x5c, 0x3c, 0xfb, 0x51, 0x11, 0x01, 0x17, 0xdb, 0xc9, 0x9b, 0x51, 0x03, 0x9a, 0xe9, 0xe5, 0x24, 0x1e, 0xf5, 0xda, 0xe0, 0x48, 0x02, 0x23, 0xd0, 0x2c, 0x81,
- /* (2^ 90)P */ 0x42, 0x1b, 0xe4, 0x91, 0x85, 0x2a, 0x0c, 0xd2, 0x28, 0x66, 0x57, 0x9e, 0x33, 0x8d, 0x25, 0x71, 0x10, 0x65, 0x76, 0xa2, 0x8c, 0x21, 0x86, 0x81, 0x15, 0xc2, 0x27, 0xeb, 0x54, 0x2d, 0x4f, 0x6c, 0xe6, 0xd6, 0x24, 0x9c, 0x1a, 0x12, 0xb8, 0x81, 0xe2, 0x0a, 0xf3, 0xd3, 0xf0, 0xd3, 0xe1, 0x74, 0x1f, 0x9b, 0x11, 0x47, 0xd0, 0xcf, 0xb6, 0x54,
- /* (2^ 91)P */ 0x26, 0x45, 0xa2, 0x10, 0xd4, 0x2d, 0xae, 0xc0, 0xb0, 0xe8, 0x86, 0xb3, 0xc7, 0xea, 0x70, 0x87, 0x61, 0xb5, 0xa5, 0x55, 0xbe, 0x88, 0x1d, 0x7a, 0xd9, 0x6f, 0xeb, 0x83, 0xe2, 0x44, 0x7f, 0x98, 0x04, 0xd6, 0x50, 0x9d, 0xa7, 0x86, 0x66, 0x09, 0x63, 0xe1, 0xed, 0x72, 0xb1, 0xe4, 0x1d, 0x3a, 0xfd, 0x47, 0xce, 0x1c, 0xaa, 0x3b, 0x8f, 0x1b,
- /* (2^ 92)P */ 0xf4, 0x3c, 0x4a, 0xb6, 0xc2, 0x9c, 0xe0, 0x2e, 0xb7, 0x38, 0xea, 0x61, 0x35, 0x97, 0x10, 0x90, 0xae, 0x22, 0x48, 0xb3, 0xa9, 0xc6, 0x7a, 0xbb, 0x23, 0xf2, 0xf8, 0x1b, 0xa7, 0xa1, 0x79, 0xcc, 0xc4, 0xf8, 0x08, 0x76, 0x8a, 0x5a, 0x1c, 0x1b, 0xc5, 0x33, 0x91, 0xa9, 0xb8, 0xb9, 0xd3, 0xf8, 0x49, 0xcd, 0xe5, 0x82, 0x43, 0xf7, 0xca, 0x68,
- /* (2^ 93)P */ 0x38, 0xba, 0xae, 0x44, 0xfe, 0x57, 0x64, 0x56, 0x7c, 0x0e, 0x9c, 0xca, 0xff, 0xa9, 0x82, 0xbb, 0x38, 0x4a, 0xa7, 0xf7, 0x47, 0xab, 0xbe, 0x6d, 0x23, 0x0b, 0x8a, 0xed, 0xc2, 0xb9, 0x8f, 0xf1, 0xec, 0x91, 0x44, 0x73, 0x64, 0xba, 0xd5, 0x8f, 0x37, 0x38, 0x0d, 0xd5, 0xf8, 0x73, 0x57, 0xb6, 0xc2, 0x45, 0xdc, 0x25, 0xb2, 0xb6, 0xea, 0xd9,
- /* (2^ 94)P */ 0xbf, 0xe9, 0x1a, 0x40, 0x4d, 0xcc, 0xe6, 0x1d, 0x70, 0x1a, 0x65, 0xcc, 0x34, 0x2c, 0x37, 0x2c, 0x2d, 0x6b, 0x6d, 0xe5, 0x2f, 0x19, 0x9e, 0xe4, 0xe1, 0xaa, 0xd4, 0xab, 0x54, 0xf4, 0xa8, 0xe4, 0x69, 0x2d, 0x8e, 0x4d, 0xd7, 0xac, 0xb0, 0x5b, 0xfe, 0xe3, 0x26, 0x07, 0xc3, 0xf8, 0x1b, 0x43, 0xa8, 0x1d, 0x64, 0xa5, 0x25, 0x88, 0xbb, 0x77,
- /* (2^ 95)P */ 0x92, 0xcd, 0x6e, 0xa0, 0x79, 0x04, 0x18, 0xf4, 0x11, 0x58, 0x48, 0xb5, 0x3c, 0x7b, 0xd1, 0xcc, 0xd3, 0x14, 0x2c, 0xa0, 0xdd, 0x04, 0x44, 0x11, 0xb3, 0x6d, 0x2f, 0x0d, 0xf5, 0x2a, 0x75, 0x5d, 0x1d, 0xda, 0x86, 0x8d, 0x7d, 0x6b, 0x32, 0x68, 0xb6, 0x6c, 0x64, 0x9e, 0xde, 0x80, 0x88, 0xce, 0x08, 0xbf, 0x0b, 0xe5, 0x8e, 0x4f, 0x1d, 0xfb,
- /* (2^ 96)P */ 0xaf, 0xe8, 0x85, 0xbf, 0x7f, 0x37, 0x8d, 0x66, 0x7c, 0xd5, 0xd3, 0x96, 0xa5, 0x81, 0x67, 0x95, 0xff, 0x48, 0xde, 0xde, 0xd7, 0x7a, 0x46, 0x34, 0xb1, 0x13, 0x70, 0x29, 0xed, 0x87, 0x90, 0xb0, 0x40, 0x2c, 0xa6, 0x43, 0x6e, 0xb6, 0xbc, 0x48, 0x8a, 0xc1, 0xae, 0xb8, 0xd4, 0xe2, 0xc0, 0x32, 0xb2, 0xa6, 0x2a, 0x8f, 0xb5, 0x16, 0x9e, 0xc3,
- /* (2^ 97)P */ 0xff, 0x4d, 0xd2, 0xd6, 0x74, 0xef, 0x2c, 0x96, 0xc1, 0x11, 0xa8, 0xb8, 0xfe, 0x94, 0x87, 0x3e, 0xa0, 0xfb, 0x57, 0xa3, 0xfc, 0x7a, 0x7e, 0x6a, 0x59, 0x6c, 0x54, 0xbb, 0xbb, 0xa2, 0x25, 0x38, 0x1b, 0xdf, 0x5d, 0x7b, 0x94, 0x14, 0xde, 0x07, 0x6e, 0xd3, 0xab, 0x02, 0x26, 0x74, 0x16, 0x12, 0xdf, 0x2e, 0x2a, 0xa7, 0xb0, 0xe8, 0x29, 0xc0,
- /* (2^ 98)P */ 0x6a, 0x38, 0x0b, 0xd3, 0xba, 0x45, 0x23, 0xe0, 0x04, 0x3b, 0x83, 0x39, 0xc5, 0x11, 0xe6, 0xcf, 0x39, 0x0a, 0xb3, 0xb0, 0x3b, 0x27, 0x29, 0x63, 0x1c, 0xf3, 0x00, 0xe6, 0xd2, 0x55, 0x21, 0x1f, 0x84, 0x97, 0x9f, 0x01, 0x49, 0x43, 0x30, 0x5f, 0xe0, 0x1d, 0x24, 0xc4, 0x4e, 0xa0, 0x2b, 0x0b, 0x12, 0x55, 0xc3, 0x27, 0xae, 0x08, 0x83, 0x7c,
- /* (2^ 99)P */ 0x5d, 0x1a, 0xb7, 0xa9, 0xf5, 0xfd, 0xec, 0xad, 0xb7, 0x87, 0x02, 0x5f, 0x0d, 0x30, 0x4d, 0xe2, 0x65, 0x87, 0xa4, 0x41, 0x45, 0x1d, 0x67, 0xe0, 0x30, 0x5c, 0x13, 0x87, 0xf6, 0x2e, 0x08, 0xc1, 0xc7, 0x12, 0x45, 0xc8, 0x9b, 0xad, 0xb8, 0xd5, 0x57, 0xbb, 0x5c, 0x48, 0x3a, 0xe1, 0x91, 0x5e, 0xf6, 0x4d, 0x8a, 0x63, 0x75, 0x69, 0x0c, 0x01,
- /* (2^100)P */ 0x8f, 0x53, 0x2d, 0xa0, 0x71, 0x3d, 0xfc, 0x45, 0x10, 0x96, 0xcf, 0x56, 0xf9, 0xbb, 0x40, 0x3c, 0x86, 0x52, 0x76, 0xbe, 0x84, 0xf9, 0xa6, 0x9d, 0x3d, 0x27, 0xbe, 0xb4, 0x00, 0x49, 0x94, 0xf5, 0x5d, 0xe1, 0x62, 0x85, 0x66, 0xe5, 0xb8, 0x20, 0x2c, 0x09, 0x7d, 0x9d, 0x3d, 0x6e, 0x74, 0x39, 0xab, 0xad, 0xa0, 0x90, 0x97, 0x5f, 0xbb, 0xa7,
- /* (2^101)P */ 0xdb, 0x2d, 0x99, 0x08, 0x16, 0x46, 0x83, 0x7a, 0xa8, 0xea, 0x3d, 0x28, 0x5b, 0x49, 0xfc, 0xb9, 0x6d, 0x00, 0x9e, 0x54, 0x4f, 0x47, 0x64, 0x9b, 0x58, 0x4d, 0x07, 0x0c, 0x6f, 0x29, 0x56, 0x0b, 0x00, 0x14, 0x85, 0x96, 0x41, 0x04, 0xb9, 0x5c, 0xa4, 0xf6, 0x16, 0x73, 0x6a, 0xc7, 0x62, 0x0c, 0x65, 0x2f, 0x93, 0xbf, 0xf7, 0xb9, 0xb7, 0xf1,
- /* (2^102)P */ 0xeb, 0x6d, 0xb3, 0x46, 0x32, 0xd2, 0xcb, 0x08, 0x94, 0x14, 0xbf, 0x3f, 0xc5, 0xcb, 0x5f, 0x9f, 0x8a, 0x89, 0x0c, 0x1b, 0x45, 0xad, 0x4c, 0x50, 0xb4, 0xe1, 0xa0, 0x6b, 0x11, 0x92, 0xaf, 0x1f, 0x00, 0xcc, 0xe5, 0x13, 0x7e, 0xe4, 0x2e, 0xa0, 0x57, 0xf3, 0xa7, 0x84, 0x79, 0x7a, 0xc2, 0xb7, 0xb7, 0xfc, 0x5d, 0xa5, 0xa9, 0x64, 0xcc, 0xd8,
- /* (2^103)P */ 0xa9, 0xc4, 0x12, 0x8b, 0x34, 0x78, 0x3e, 0x38, 0xfd, 0x3f, 0x87, 0xfa, 0x88, 0x94, 0xd5, 0xd9, 0x7f, 0xeb, 0x58, 0xff, 0xb9, 0x45, 0xdb, 0xa1, 0xed, 0x22, 0x28, 0x1d, 0x00, 0x6d, 0x79, 0x85, 0x7a, 0x75, 0x5d, 0xf0, 0xb1, 0x9e, 0x47, 0x28, 0x8c, 0x62, 0xdf, 0xfb, 0x4c, 0x7b, 0xc5, 0x1a, 0x42, 0x95, 0xef, 0x9a, 0xb7, 0x27, 0x7e, 0xda,
- /* (2^104)P */ 0xca, 0xd5, 0xc0, 0x17, 0xa1, 0x66, 0x79, 0x9c, 0x2a, 0xb7, 0x0a, 0xfe, 0x62, 0xe4, 0x26, 0x78, 0x90, 0xa7, 0xcb, 0xb0, 0x4f, 0x6d, 0xf9, 0x8f, 0xf7, 0x7d, 0xac, 0xb8, 0x78, 0x1f, 0x41, 0xea, 0x97, 0x1e, 0x62, 0x97, 0x43, 0x80, 0x58, 0x80, 0xb6, 0x69, 0x7d, 0xee, 0x16, 0xd2, 0xa1, 0x81, 0xd7, 0xb1, 0x27, 0x03, 0x48, 0xda, 0xab, 0xec,
- /* (2^105)P */ 0x5b, 0xed, 0x40, 0x8e, 0x8c, 0xc1, 0x66, 0x90, 0x7f, 0x0c, 0xb2, 0xfc, 0xbd, 0x16, 0xac, 0x7d, 0x4c, 0x6a, 0xf9, 0xae, 0xe7, 0x4e, 0x11, 0x12, 0xe9, 0xbe, 0x17, 0x09, 0xc6, 0xc1, 0x5e, 0xb5, 0x7b, 0x50, 0x5c, 0x27, 0xfb, 0x80, 0xab, 0x01, 0xfa, 0x5b, 0x9b, 0x75, 0x16, 0x6e, 0xb2, 0x5c, 0x8c, 0x2f, 0xa5, 0x6a, 0x1a, 0x68, 0xa6, 0x90,
- /* (2^106)P */ 0x75, 0xfe, 0xb6, 0x96, 0x96, 0x87, 0x4c, 0x12, 0xa9, 0xd1, 0xd8, 0x03, 0xa3, 0xc1, 0x15, 0x96, 0xe8, 0xa0, 0x75, 0x82, 0xa0, 0x6d, 0xea, 0x54, 0xdc, 0x5f, 0x0d, 0x7e, 0xf6, 0x70, 0xb5, 0xdc, 0x7a, 0xf6, 0xc4, 0xd4, 0x21, 0x49, 0xf5, 0xd4, 0x14, 0x6d, 0x48, 0x1d, 0x7c, 0x99, 0x42, 0xdf, 0x78, 0x6b, 0x9d, 0xb9, 0x30, 0x3c, 0xd0, 0x29,
- /* (2^107)P */ 0x85, 0xd6, 0xd8, 0xf3, 0x91, 0x74, 0xdd, 0xbd, 0x72, 0x96, 0x10, 0xe4, 0x76, 0x02, 0x5a, 0x72, 0x67, 0xd3, 0x17, 0x72, 0x14, 0x9a, 0x20, 0x5b, 0x0f, 0x8d, 0xed, 0x6d, 0x4e, 0xe3, 0xd9, 0x82, 0xc2, 0x99, 0xee, 0x39, 0x61, 0x69, 0x8a, 0x24, 0x01, 0x92, 0x15, 0xe7, 0xfc, 0xf9, 0x4d, 0xac, 0xf1, 0x30, 0x49, 0x01, 0x0b, 0x6e, 0x0f, 0x20,
- /* (2^108)P */ 0xd8, 0x25, 0x94, 0x5e, 0x43, 0x29, 0xf5, 0xcc, 0xe8, 0xe3, 0x55, 0x41, 0x3c, 0x9f, 0x58, 0x5b, 0x00, 0xeb, 0xc5, 0xdf, 0xcf, 0xfb, 0xfd, 0x6e, 0x92, 0xec, 0x99, 0x30, 0xd6, 0x05, 0xdd, 0x80, 0x7a, 0x5d, 0x6d, 0x16, 0x85, 0xd8, 0x9d, 0x43, 0x65, 0xd8, 0x2c, 0x33, 0x2f, 0x5c, 0x41, 0xea, 0xb7, 0x95, 0x77, 0xf2, 0x9e, 0x59, 0x09, 0xe8,
- /* (2^109)P */ 0x00, 0xa0, 0x03, 0x80, 0xcd, 0x60, 0xe5, 0x17, 0xd4, 0x15, 0x99, 0xdd, 0x4f, 0xbf, 0x66, 0xb8, 0xc0, 0xf5, 0xf9, 0xfc, 0x6d, 0x42, 0x18, 0x34, 0x1c, 0x7d, 0x5b, 0xb5, 0x09, 0xd0, 0x99, 0x57, 0x81, 0x0b, 0x62, 0xb3, 0xa2, 0xf9, 0x0b, 0xae, 0x95, 0xb8, 0xc2, 0x3b, 0x0d, 0x5b, 0x00, 0xf1, 0xed, 0xbc, 0x05, 0x9d, 0x61, 0xbc, 0x73, 0x9d,
- /* (2^110)P */ 0xd4, 0xdb, 0x29, 0xe5, 0x85, 0xe9, 0xc6, 0x89, 0x2a, 0xa8, 0x54, 0xab, 0xb3, 0x7f, 0x88, 0xc0, 0x4d, 0xe0, 0xd1, 0x74, 0x6e, 0xa3, 0xa7, 0x39, 0xd5, 0xcc, 0xa1, 0x8a, 0xcb, 0x5b, 0x34, 0xad, 0x92, 0xb4, 0xd8, 0xd5, 0x17, 0xf6, 0x77, 0x18, 0x9e, 0xaf, 0x45, 0x3b, 0x03, 0xe2, 0xf8, 0x52, 0x60, 0xdc, 0x15, 0x20, 0x9e, 0xdf, 0xd8, 0x5d,
- /* (2^111)P */ 0x02, 0xc1, 0xac, 0x1a, 0x15, 0x8e, 0x6c, 0xf5, 0x1e, 0x1e, 0xba, 0x7e, 0xc2, 0xda, 0x7d, 0x02, 0xda, 0x43, 0xae, 0x04, 0x70, 0x28, 0x54, 0x78, 0x94, 0xf5, 0x4f, 0x07, 0x84, 0x8f, 0xed, 0xaa, 0xc0, 0xb8, 0xcd, 0x7f, 0x7e, 0x33, 0xa3, 0xbe, 0x21, 0x29, 0xc8, 0x56, 0x34, 0xc0, 0x76, 0x87, 0x8f, 0xc7, 0x73, 0x58, 0x90, 0x16, 0xfc, 0xd6,
- /* (2^112)P */ 0xb8, 0x3f, 0xe1, 0xdf, 0x3a, 0x91, 0x25, 0x0c, 0xf6, 0x47, 0xa8, 0x89, 0xc4, 0xc6, 0x61, 0xec, 0x86, 0x2c, 0xfd, 0xbe, 0xa4, 0x6f, 0xc2, 0xd4, 0x46, 0x19, 0x70, 0x5d, 0x09, 0x02, 0x86, 0xd3, 0x4b, 0xe9, 0x16, 0x7b, 0xf0, 0x0d, 0x6c, 0xff, 0x91, 0x05, 0xbf, 0x55, 0xb4, 0x00, 0x8d, 0xe5, 0x6d, 0x68, 0x20, 0x90, 0x12, 0xb5, 0x5c, 0x32,
- /* (2^113)P */ 0x80, 0x45, 0xc8, 0x51, 0x87, 0xba, 0x1c, 0x5c, 0xcf, 0x5f, 0x4b, 0x3c, 0x9e, 0x3b, 0x36, 0xd2, 0x26, 0xa2, 0x7f, 0xab, 0xb7, 0xbf, 0xda, 0x68, 0x23, 0x8f, 0xc3, 0xa0, 0xfd, 0xad, 0xf1, 0x56, 0x3b, 0xd0, 0x75, 0x2b, 0x44, 0x61, 0xd8, 0xf4, 0xf1, 0x05, 0x49, 0x53, 0x07, 0xee, 0x47, 0xef, 0xc0, 0x7c, 0x9d, 0xe4, 0x15, 0x88, 0xc5, 0x47,
- /* (2^114)P */ 0x2d, 0xb5, 0x09, 0x80, 0xb9, 0xd3, 0xd8, 0xfe, 0x4c, 0xd2, 0xa6, 0x6e, 0xd3, 0x75, 0xcf, 0xb0, 0x99, 0xcb, 0x50, 0x8d, 0xe9, 0x67, 0x9b, 0x20, 0xe8, 0x57, 0xd8, 0x14, 0x85, 0x73, 0x6a, 0x74, 0xe0, 0x99, 0xf0, 0x6b, 0x6e, 0x59, 0x30, 0x31, 0x33, 0x96, 0x5f, 0xa1, 0x0c, 0x1b, 0xf4, 0xca, 0x09, 0xe1, 0x9b, 0xb5, 0xcf, 0x6d, 0x0b, 0xeb,
- /* (2^115)P */ 0x1a, 0xde, 0x50, 0xa9, 0xac, 0x3e, 0x10, 0x43, 0x4f, 0x82, 0x4f, 0xc0, 0xfe, 0x3f, 0x33, 0xd2, 0x64, 0x86, 0x50, 0xa9, 0x51, 0x76, 0x5e, 0x50, 0x97, 0x6c, 0x73, 0x8d, 0x77, 0xa3, 0x75, 0x03, 0xbc, 0xc9, 0xfb, 0x50, 0xd9, 0x6d, 0x16, 0xad, 0x5d, 0x32, 0x3d, 0xac, 0x44, 0xdf, 0x51, 0xf7, 0x19, 0xd4, 0x0b, 0x57, 0x78, 0x0b, 0x81, 0x4e,
- /* (2^116)P */ 0x32, 0x24, 0xf1, 0x6c, 0x55, 0x62, 0x1d, 0xb3, 0x1f, 0xda, 0xfa, 0x6a, 0x8f, 0x98, 0x01, 0x16, 0xde, 0x44, 0x50, 0x0d, 0x2e, 0x6c, 0x0b, 0xa2, 0xd3, 0x74, 0x0e, 0xa9, 0xbf, 0x8d, 0xa9, 0xc8, 0xc8, 0x2f, 0x62, 0xc1, 0x35, 0x5e, 0xfd, 0x3a, 0xb3, 0x83, 0x2d, 0xee, 0x4e, 0xfd, 0x5c, 0x5e, 0xad, 0x85, 0xa5, 0x10, 0xb5, 0x4f, 0x34, 0xa7,
- /* (2^117)P */ 0xd1, 0x58, 0x6f, 0xe6, 0x54, 0x2c, 0xc2, 0xcd, 0xcf, 0x83, 0xdc, 0x88, 0x0c, 0xb9, 0xb4, 0x62, 0x18, 0x89, 0x65, 0x28, 0xe9, 0x72, 0x4b, 0x65, 0xcf, 0xd6, 0x90, 0x88, 0xd7, 0x76, 0x17, 0x4f, 0x74, 0x64, 0x1e, 0xcb, 0xd3, 0xf5, 0x4b, 0xaa, 0x2e, 0x4d, 0x2d, 0x7c, 0x13, 0x1f, 0xfd, 0xd9, 0x60, 0x83, 0x7e, 0xda, 0x64, 0x1c, 0xdc, 0x9f,
- /* (2^118)P */ 0xad, 0xef, 0xac, 0x1b, 0xc1, 0x30, 0x5a, 0x15, 0xc9, 0x1f, 0xac, 0xf1, 0xca, 0x44, 0x95, 0x95, 0xea, 0xf2, 0x22, 0xe7, 0x8d, 0x25, 0xf0, 0xff, 0xd8, 0x71, 0xf7, 0xf8, 0x8f, 0x8f, 0xcd, 0xf4, 0x1e, 0xfe, 0x6c, 0x68, 0x04, 0xb8, 0x78, 0xa1, 0x5f, 0xa6, 0x5d, 0x5e, 0xf9, 0x8d, 0xea, 0x80, 0xcb, 0xf3, 0x17, 0xa6, 0x03, 0xc9, 0x38, 0xd5,
- /* (2^119)P */ 0x79, 0x14, 0x31, 0xc3, 0x38, 0xe5, 0xaa, 0xbf, 0x17, 0xa3, 0x04, 0x4e, 0x80, 0x59, 0x9c, 0x9f, 0x19, 0x39, 0xe4, 0x2d, 0x23, 0x54, 0x4a, 0x7f, 0x3e, 0xf3, 0xd9, 0xc7, 0xba, 0x6c, 0x8f, 0x6b, 0xfa, 0x34, 0xb5, 0x23, 0x17, 0x1d, 0xff, 0x1d, 0xea, 0x1f, 0xd7, 0xba, 0x61, 0xb2, 0xe0, 0x38, 0x6a, 0xe9, 0xcf, 0x48, 0x5d, 0x6a, 0x10, 0x9c,
- /* (2^120)P */ 0xc8, 0xbb, 0x13, 0x1c, 0x3f, 0x3c, 0x34, 0xfd, 0xac, 0x37, 0x52, 0x44, 0x25, 0xa8, 0xde, 0x1d, 0x63, 0xf4, 0x81, 0x9a, 0xbe, 0x0b, 0x74, 0x2e, 0xc8, 0x51, 0x16, 0xd3, 0xac, 0x4a, 0xaf, 0xe2, 0x5f, 0x3a, 0x89, 0x32, 0xd1, 0x9b, 0x7c, 0x90, 0x0d, 0xac, 0xdc, 0x8b, 0x73, 0x45, 0x45, 0x97, 0xb1, 0x90, 0x2c, 0x1b, 0x31, 0xca, 0xb1, 0x94,
- /* (2^121)P */ 0x07, 0x28, 0xdd, 0x10, 0x14, 0xa5, 0x95, 0x7e, 0xf3, 0xe4, 0xd4, 0x14, 0xb4, 0x7e, 0x76, 0xdb, 0x42, 0xd6, 0x94, 0x3e, 0xeb, 0x44, 0x64, 0x88, 0x0d, 0xec, 0xc1, 0x21, 0xf0, 0x79, 0xe0, 0x83, 0x67, 0x55, 0x53, 0xc2, 0xf6, 0xc5, 0xc5, 0x89, 0x39, 0xe8, 0x42, 0xd0, 0x17, 0xbd, 0xff, 0x35, 0x59, 0x0e, 0xc3, 0x06, 0x86, 0xd4, 0x64, 0xcf,
- /* (2^122)P */ 0x91, 0xa8, 0xdb, 0x57, 0x9b, 0xe2, 0x96, 0x31, 0x10, 0x6e, 0xd7, 0x9a, 0x97, 0xb3, 0xab, 0xb5, 0x15, 0x66, 0xbe, 0xcc, 0x6d, 0x9a, 0xac, 0x06, 0xb3, 0x0d, 0xaa, 0x4b, 0x9c, 0x96, 0x79, 0x6c, 0x34, 0xee, 0x9e, 0x53, 0x4d, 0x6e, 0xbd, 0x88, 0x02, 0xbf, 0x50, 0x54, 0x12, 0x5d, 0x01, 0x02, 0x46, 0xc6, 0x74, 0x02, 0x8c, 0x24, 0xae, 0xb1,
- /* (2^123)P */ 0xf5, 0x22, 0xea, 0xac, 0x7d, 0x9c, 0x33, 0x8a, 0xa5, 0x36, 0x79, 0x6a, 0x4f, 0xa4, 0xdc, 0xa5, 0x73, 0x64, 0xc4, 0x6f, 0x43, 0x02, 0x3b, 0x94, 0x66, 0xd2, 0x4b, 0x4f, 0xf6, 0x45, 0x33, 0x5d, 0x10, 0x33, 0x18, 0x1e, 0xa3, 0xfc, 0xf7, 0xd2, 0xb8, 0xc8, 0xa7, 0xe0, 0x76, 0x8a, 0xcd, 0xff, 0x4f, 0x99, 0x34, 0x47, 0x84, 0x91, 0x96, 0x9f,
- /* (2^124)P */ 0x8a, 0x48, 0x3b, 0x48, 0x4a, 0xbc, 0xac, 0xe2, 0x80, 0xd6, 0xd2, 0x35, 0xde, 0xd0, 0x56, 0x42, 0x33, 0xb3, 0x56, 0x5a, 0xcd, 0xb8, 0x3d, 0xb5, 0x25, 0xc1, 0xed, 0xff, 0x87, 0x0b, 0x79, 0xff, 0xf2, 0x62, 0xe1, 0x76, 0xc6, 0xa2, 0x0f, 0xa8, 0x9b, 0x0d, 0xcc, 0x3f, 0x3d, 0x35, 0x27, 0x8d, 0x0b, 0x74, 0xb0, 0xc3, 0x78, 0x8c, 0xcc, 0xc8,
- /* (2^125)P */ 0xfc, 0x9a, 0x0c, 0xa8, 0x49, 0x42, 0xb8, 0xdf, 0xcf, 0xb3, 0x19, 0xa6, 0x64, 0x57, 0xfe, 0xe8, 0xf8, 0xa6, 0x4b, 0x86, 0xa1, 0xd5, 0x83, 0x7f, 0x14, 0x99, 0x18, 0x0c, 0x7d, 0x5b, 0xf7, 0x3d, 0xf9, 0x4b, 0x79, 0xb1, 0x86, 0x30, 0xb4, 0x5e, 0x6a, 0xe8, 0x9d, 0xfa, 0x8a, 0x41, 0xc4, 0x30, 0xfc, 0x56, 0x74, 0x14, 0x42, 0xc8, 0x96, 0x0e,
- /* (2^126)P */ 0xdf, 0x66, 0xec, 0xbc, 0x44, 0xdb, 0x19, 0xce, 0xd4, 0xb5, 0x49, 0x40, 0x07, 0x49, 0xe0, 0x3a, 0x61, 0x10, 0xfb, 0x7d, 0xba, 0xb1, 0xe0, 0x28, 0x5b, 0x99, 0x59, 0x96, 0xa2, 0xee, 0xe0, 0x23, 0x37, 0x39, 0x1f, 0xe6, 0x57, 0x9f, 0xf8, 0xf8, 0xdc, 0x74, 0xf6, 0x8f, 0x4f, 0x5e, 0x51, 0xa4, 0x12, 0xac, 0xbe, 0xe4, 0xf3, 0xd1, 0xf0, 0x24,
- /* (2^127)P */ 0x1e, 0x3e, 0x9a, 0x5f, 0xdf, 0x9f, 0xd6, 0x4e, 0x8a, 0x28, 0xc3, 0xcd, 0x96, 0x9d, 0x57, 0xc7, 0x61, 0x81, 0x90, 0xff, 0xae, 0xb1, 0x4f, 0xc2, 0x96, 0x8b, 0x1a, 0x18, 0xf4, 0x50, 0xcb, 0x31, 0xe1, 0x57, 0xf4, 0x90, 0xa8, 0xea, 0xac, 0xe7, 0x61, 0x98, 0xb6, 0x15, 0xc1, 0x7b, 0x29, 0xa4, 0xc3, 0x18, 0xef, 0xb9, 0xd8, 0xdf, 0xf6, 0xac,
- /* (2^128)P */ 0xca, 0xa8, 0x6c, 0xf1, 0xb4, 0xca, 0xfe, 0x31, 0xee, 0x48, 0x38, 0x8b, 0x0e, 0xbb, 0x7a, 0x30, 0xaa, 0xf9, 0xee, 0x27, 0x53, 0x24, 0xdc, 0x2e, 0x15, 0xa6, 0x48, 0x8f, 0xa0, 0x7e, 0xf1, 0xdc, 0x93, 0x87, 0x39, 0xeb, 0x7f, 0x38, 0x92, 0x92, 0x4c, 0x29, 0xe9, 0x57, 0xd8, 0x59, 0xfc, 0xe9, 0x9c, 0x44, 0xc0, 0x65, 0xcf, 0xac, 0x4b, 0xdc,
- /* (2^129)P */ 0xa3, 0xd0, 0x37, 0x8f, 0x86, 0x2f, 0xc6, 0x47, 0x55, 0x46, 0x65, 0x26, 0x4b, 0x91, 0xe2, 0x18, 0x5c, 0x4f, 0x23, 0xc1, 0x37, 0x29, 0xb9, 0xc1, 0x27, 0xc5, 0x3c, 0xbf, 0x7e, 0x23, 0xdb, 0x73, 0x99, 0xbd, 0x1b, 0xb2, 0x31, 0x68, 0x3a, 0xad, 0xb7, 0xb0, 0x10, 0xc5, 0xe5, 0x11, 0x51, 0xba, 0xa7, 0x60, 0x66, 0x54, 0xf0, 0x08, 0xd7, 0x69,
- /* (2^130)P */ 0x89, 0x41, 0x79, 0xcc, 0xeb, 0x0a, 0xf5, 0x4b, 0xa3, 0x4c, 0xce, 0x52, 0xb0, 0xa7, 0xe4, 0x41, 0x75, 0x7d, 0x04, 0xbb, 0x09, 0x4c, 0x50, 0x9f, 0xdf, 0xea, 0x74, 0x61, 0x02, 0xad, 0xb4, 0x9d, 0xb7, 0x05, 0xb9, 0xea, 0xeb, 0x91, 0x35, 0xe7, 0x49, 0xea, 0xd3, 0x4f, 0x3c, 0x60, 0x21, 0x7a, 0xde, 0xc7, 0xe2, 0x5a, 0xee, 0x8e, 0x93, 0xc7,
- /* (2^131)P */ 0x00, 0xe8, 0xed, 0xd0, 0xb3, 0x0d, 0xaf, 0xb2, 0xde, 0x2c, 0xf6, 0x00, 0xe2, 0xea, 0x6d, 0xf8, 0x0e, 0xd9, 0x67, 0x59, 0xa9, 0x50, 0xbb, 0x17, 0x8f, 0xff, 0xb1, 0x9f, 0x17, 0xb6, 0xf2, 0xb5, 0xba, 0x80, 0xf7, 0x0f, 0xba, 0xd5, 0x09, 0x43, 0xaa, 0x4e, 0x3a, 0x67, 0x6a, 0x89, 0x9b, 0x18, 0x65, 0x35, 0xf8, 0x3a, 0x49, 0x91, 0x30, 0x51,
- /* (2^132)P */ 0x8d, 0x25, 0xe9, 0x0e, 0x7d, 0x50, 0x76, 0xe4, 0x58, 0x7e, 0xb9, 0x33, 0xe6, 0x65, 0x90, 0xc2, 0x50, 0x9d, 0x50, 0x2e, 0x11, 0xad, 0xd5, 0x43, 0x52, 0x32, 0x41, 0x4f, 0x7b, 0xb6, 0xa0, 0xec, 0x81, 0x75, 0x36, 0x7c, 0x77, 0x85, 0x59, 0x70, 0xe4, 0xf9, 0xef, 0x66, 0x8d, 0x35, 0xc8, 0x2a, 0x6e, 0x5b, 0xc6, 0x0d, 0x0b, 0x29, 0x60, 0x68,
- /* (2^133)P */ 0xf8, 0xce, 0xb0, 0x3a, 0x56, 0x7d, 0x51, 0x9a, 0x25, 0x73, 0xea, 0xdd, 0xe4, 0xe0, 0x0e, 0xf0, 0x07, 0xc0, 0x31, 0x00, 0x73, 0x35, 0xd0, 0x39, 0xc4, 0x9b, 0xb7, 0x95, 0xe0, 0x62, 0x70, 0x36, 0x0b, 0xcb, 0xa0, 0x42, 0xde, 0x51, 0xcf, 0x41, 0xe0, 0xb8, 0xb4, 0xc0, 0xe5, 0x46, 0x99, 0x9f, 0x02, 0x7f, 0x14, 0x8c, 0xc1, 0x4e, 0xef, 0xe8,
- /* (2^134)P */ 0x10, 0x01, 0x57, 0x0a, 0xbe, 0x8b, 0x18, 0xc8, 0xca, 0x00, 0x28, 0x77, 0x4a, 0x9a, 0xc7, 0x55, 0x2a, 0xcc, 0x0c, 0x7b, 0xb9, 0xe9, 0xc8, 0x97, 0x7c, 0x02, 0xe3, 0x09, 0x2f, 0x62, 0x30, 0xb8, 0x40, 0x09, 0x65, 0xe9, 0x55, 0x63, 0xb5, 0x07, 0xca, 0x9f, 0x00, 0xdf, 0x9d, 0x5c, 0xc7, 0xee, 0x57, 0xa5, 0x90, 0x15, 0x1e, 0x22, 0xa0, 0x12,
- /* (2^135)P */ 0x71, 0x2d, 0xc9, 0xef, 0x27, 0xb9, 0xd8, 0x12, 0x43, 0x6b, 0xa8, 0xce, 0x3b, 0x6d, 0x6e, 0x91, 0x43, 0x23, 0xbc, 0x32, 0xb3, 0xbf, 0xe1, 0xc7, 0x39, 0xcf, 0x7c, 0x42, 0x4c, 0xb1, 0x30, 0xe2, 0xdd, 0x69, 0x06, 0xe5, 0xea, 0xf0, 0x2a, 0x16, 0x50, 0x71, 0xca, 0x92, 0xdf, 0xc1, 0xcc, 0xec, 0xe6, 0x54, 0x07, 0xf3, 0x18, 0x8d, 0xd8, 0x29,
- /* (2^136)P */ 0x98, 0x51, 0x48, 0x8f, 0xfa, 0x2e, 0x5e, 0x67, 0xb0, 0xc6, 0x17, 0x12, 0xb6, 0x7d, 0xc9, 0xad, 0x81, 0x11, 0xad, 0x0c, 0x1c, 0x2d, 0x45, 0xdf, 0xac, 0x66, 0xbd, 0x08, 0x6f, 0x7c, 0xc7, 0x06, 0x6e, 0x19, 0x08, 0x39, 0x64, 0xd7, 0xe4, 0xd1, 0x11, 0x5f, 0x1c, 0xf4, 0x67, 0xc3, 0x88, 0x6a, 0xe6, 0x07, 0xa3, 0x83, 0xd7, 0xfd, 0x2a, 0xf9,
- /* (2^137)P */ 0x87, 0xed, 0xeb, 0xd9, 0xdf, 0xff, 0x43, 0x8b, 0xaa, 0x20, 0x58, 0xb0, 0xb4, 0x6b, 0x14, 0xb8, 0x02, 0xc5, 0x40, 0x20, 0x22, 0xbb, 0xf7, 0xb4, 0xf3, 0x05, 0x1e, 0x4d, 0x94, 0xff, 0xe3, 0xc5, 0x22, 0x82, 0xfe, 0xaf, 0x90, 0x42, 0x98, 0x6b, 0x76, 0x8b, 0x3e, 0x89, 0x3f, 0x42, 0x2a, 0xa7, 0x26, 0x00, 0xda, 0x5c, 0xa2, 0x2b, 0xec, 0xdd,
- /* (2^138)P */ 0x5c, 0x21, 0x16, 0x0d, 0x46, 0xb8, 0xd0, 0xa7, 0x88, 0xe7, 0x25, 0xcb, 0x3e, 0x50, 0x73, 0x61, 0xe7, 0xaf, 0x5a, 0x3f, 0x47, 0x8b, 0x3d, 0x97, 0x79, 0x2c, 0xe6, 0x6d, 0x95, 0x74, 0x65, 0x70, 0x36, 0xfd, 0xd1, 0x9e, 0x13, 0x18, 0x63, 0xb1, 0x2d, 0x0b, 0xb5, 0x36, 0x3e, 0xe7, 0x35, 0x42, 0x3b, 0xe6, 0x1f, 0x4d, 0x9d, 0x59, 0xa2, 0x43,
- /* (2^139)P */ 0x8c, 0x0c, 0x7c, 0x24, 0x9e, 0xe0, 0xf8, 0x05, 0x1c, 0x9e, 0x1f, 0x31, 0xc0, 0x70, 0xb3, 0xfb, 0x4e, 0xf8, 0x0a, 0x57, 0xb7, 0x49, 0xb5, 0x73, 0xa1, 0x5f, 0x9b, 0x6a, 0x07, 0x6c, 0x87, 0x71, 0x87, 0xd4, 0xbe, 0x98, 0x1e, 0x98, 0xee, 0x52, 0xc1, 0x7b, 0x95, 0x0f, 0x28, 0x32, 0x36, 0x28, 0xd0, 0x3a, 0x0f, 0x7d, 0x2a, 0xa9, 0x62, 0xb9,
- /* (2^140)P */ 0x97, 0xe6, 0x18, 0x77, 0xf9, 0x34, 0xac, 0xbc, 0xe0, 0x62, 0x9f, 0x42, 0xde, 0xbd, 0x2f, 0xf7, 0x1f, 0xb7, 0x14, 0x52, 0x8a, 0x79, 0xb2, 0x3f, 0xd2, 0x95, 0x71, 0x01, 0xe8, 0xaf, 0x8c, 0xa4, 0xa4, 0xa7, 0x27, 0xf3, 0x5c, 0xdf, 0x3e, 0x57, 0x7a, 0xf1, 0x76, 0x49, 0xe6, 0x42, 0x3f, 0x8f, 0x1e, 0x63, 0x4a, 0x65, 0xb5, 0x41, 0xf5, 0x02,
- /* (2^141)P */ 0x72, 0x85, 0xc5, 0x0b, 0xe1, 0x47, 0x64, 0x02, 0xc5, 0x4d, 0x81, 0x69, 0xb2, 0xcf, 0x0f, 0x6c, 0xd4, 0x6d, 0xd0, 0xc7, 0xb4, 0x1c, 0xd0, 0x32, 0x59, 0x89, 0xe2, 0xe0, 0x96, 0x8b, 0x12, 0x98, 0xbf, 0x63, 0x7a, 0x4c, 0x76, 0x7e, 0x58, 0x17, 0x8f, 0x5b, 0x0a, 0x59, 0x65, 0x75, 0xbc, 0x61, 0x1f, 0xbe, 0xc5, 0x6e, 0x0a, 0x57, 0x52, 0x70,
- /* (2^142)P */ 0x92, 0x1c, 0x77, 0xbb, 0x62, 0x02, 0x6c, 0x25, 0x9c, 0x66, 0x07, 0x83, 0xab, 0xcc, 0x80, 0x5d, 0xd2, 0x76, 0x0c, 0xa4, 0xc5, 0xb4, 0x8a, 0x68, 0x23, 0x31, 0x32, 0x29, 0x8a, 0x47, 0x92, 0x12, 0x80, 0xb3, 0xfa, 0x18, 0xe4, 0x8d, 0xc0, 0x4d, 0xfe, 0x97, 0x5f, 0x72, 0x41, 0xb5, 0x5c, 0x7a, 0xbd, 0xf0, 0xcf, 0x5e, 0x97, 0xaa, 0x64, 0x32,
- /* (2^143)P */ 0x35, 0x3f, 0x75, 0xc1, 0x7a, 0x75, 0x7e, 0xa9, 0xc6, 0x0b, 0x4e, 0x32, 0x62, 0xec, 0xe3, 0x5c, 0xfb, 0x01, 0x43, 0xb6, 0xd4, 0x5b, 0x75, 0xd2, 0xee, 0x7f, 0x5d, 0x23, 0x2b, 0xb3, 0x54, 0x34, 0x4c, 0xd3, 0xb4, 0x32, 0x84, 0x81, 0xb5, 0x09, 0x76, 0x19, 0xda, 0x58, 0xda, 0x7c, 0xdb, 0x2e, 0xdd, 0x4c, 0x8e, 0xdd, 0x5d, 0x89, 0x10, 0x10,
- /* (2^144)P */ 0x57, 0x25, 0x6a, 0x08, 0x37, 0x92, 0xa8, 0xdf, 0x24, 0xef, 0x8f, 0x33, 0x34, 0x52, 0xa4, 0x4c, 0xf0, 0x77, 0x9f, 0x69, 0x77, 0xd5, 0x8f, 0xd2, 0x9a, 0xb3, 0xb6, 0x1d, 0x2d, 0xa6, 0xf7, 0x1f, 0xda, 0xd7, 0xcb, 0x75, 0x11, 0xc3, 0x6b, 0xc0, 0x38, 0xb1, 0xd5, 0x2d, 0x96, 0x84, 0x16, 0xfa, 0x26, 0xb9, 0xcc, 0x3f, 0x16, 0x47, 0x23, 0x74,
- /* (2^145)P */ 0x9b, 0x61, 0x2a, 0x1c, 0xdd, 0x39, 0xa5, 0xfa, 0x1c, 0x7d, 0x63, 0x50, 0xca, 0xe6, 0x9d, 0xfa, 0xb7, 0xc4, 0x4c, 0x6a, 0x97, 0x5f, 0x36, 0x4e, 0x47, 0xdd, 0x17, 0xf7, 0xf9, 0x19, 0xce, 0x75, 0x17, 0xad, 0xce, 0x2a, 0xf3, 0xfe, 0x27, 0x8f, 0x3e, 0x48, 0xc0, 0x60, 0x87, 0x24, 0x19, 0xae, 0x59, 0xe4, 0x5a, 0x00, 0x2a, 0xba, 0xa2, 0x1f,
- /* (2^146)P */ 0x26, 0x88, 0x42, 0x60, 0x9f, 0x6e, 0x2c, 0x7c, 0x39, 0x0f, 0x47, 0x6a, 0x0e, 0x02, 0xbb, 0x4b, 0x34, 0x29, 0x55, 0x18, 0x36, 0xcf, 0x3b, 0x47, 0xf1, 0x2e, 0xfc, 0x6e, 0x94, 0xff, 0xe8, 0x6b, 0x06, 0xd2, 0xba, 0x77, 0x5e, 0x60, 0xd7, 0x19, 0xef, 0x02, 0x9d, 0x3a, 0xc2, 0xb7, 0xa9, 0xd8, 0x57, 0xee, 0x7e, 0x2b, 0xf2, 0x6d, 0x28, 0xda,
- /* (2^147)P */ 0xdf, 0xd9, 0x92, 0x11, 0x98, 0x23, 0xe2, 0x45, 0x2f, 0x74, 0x70, 0xee, 0x0e, 0x55, 0x65, 0x79, 0x86, 0x38, 0x17, 0x92, 0x85, 0x87, 0x99, 0x50, 0xd9, 0x7c, 0xdb, 0xa1, 0x10, 0xec, 0x30, 0xb7, 0x40, 0xa3, 0x23, 0x9b, 0x0e, 0x27, 0x49, 0x29, 0x03, 0x94, 0xff, 0x53, 0xdc, 0xd7, 0xed, 0x49, 0xa9, 0x5a, 0x3b, 0xee, 0xd7, 0xc7, 0x65, 0xaf,
- /* (2^148)P */ 0xa0, 0xbd, 0xbe, 0x03, 0xee, 0x0c, 0xbe, 0x32, 0x00, 0x7b, 0x52, 0xcb, 0x92, 0x29, 0xbf, 0xa0, 0xc6, 0xd9, 0xd2, 0xd6, 0x15, 0xe8, 0x3a, 0x75, 0x61, 0x65, 0x56, 0xae, 0xad, 0x3c, 0x2a, 0x64, 0x14, 0x3f, 0x8e, 0xc1, 0x2d, 0x0c, 0x8d, 0x20, 0xdb, 0x58, 0x4b, 0xe5, 0x40, 0x15, 0x4b, 0xdc, 0xa8, 0xbd, 0xef, 0x08, 0xa7, 0xd1, 0xf4, 0xb0,
- /* (2^149)P */ 0xa9, 0x0f, 0x05, 0x94, 0x66, 0xac, 0x1f, 0x65, 0x3f, 0xe1, 0xb8, 0xe1, 0x34, 0x5e, 0x1d, 0x8f, 0xe3, 0x93, 0x03, 0x15, 0xff, 0xb6, 0x65, 0xb6, 0x6e, 0xc0, 0x2f, 0xd4, 0x2e, 0xb9, 0x2c, 0x13, 0x3c, 0x99, 0x1c, 0xb5, 0x87, 0xba, 0x79, 0xcb, 0xf0, 0x18, 0x06, 0x86, 0x04, 0x14, 0x25, 0x09, 0xcd, 0x1c, 0x14, 0xda, 0x35, 0xd0, 0x38, 0x3b,
- /* (2^150)P */ 0x1b, 0x04, 0xa3, 0x27, 0xb4, 0xd3, 0x37, 0x48, 0x1e, 0x8f, 0x69, 0xd3, 0x5a, 0x2f, 0x20, 0x02, 0x36, 0xbe, 0x06, 0x7b, 0x6b, 0x6c, 0x12, 0x5b, 0x80, 0x74, 0x44, 0xe6, 0xf8, 0xf5, 0x95, 0x59, 0x29, 0xab, 0x51, 0x47, 0x83, 0x28, 0xe0, 0xad, 0xde, 0xaa, 0xd3, 0xb1, 0x1a, 0xcb, 0xa3, 0xcd, 0x8b, 0x6a, 0xb1, 0xa7, 0x0a, 0xd1, 0xf9, 0xbe,
- /* (2^151)P */ 0xce, 0x2f, 0x85, 0xca, 0x74, 0x6d, 0x49, 0xb8, 0xce, 0x80, 0x44, 0xe0, 0xda, 0x5b, 0xcf, 0x2f, 0x79, 0x74, 0xfe, 0xb4, 0x2c, 0x99, 0x20, 0x6e, 0x09, 0x04, 0xfb, 0x6d, 0x57, 0x5b, 0x95, 0x0c, 0x45, 0xda, 0x4f, 0x7f, 0x63, 0xcc, 0x85, 0x5a, 0x67, 0x50, 0x68, 0x71, 0xb4, 0x67, 0xb1, 0x2e, 0xc1, 0x1c, 0xdc, 0xff, 0x2a, 0x7c, 0x10, 0x5e,
- /* (2^152)P */ 0xa6, 0xde, 0xf3, 0xd4, 0x22, 0x30, 0x24, 0x9e, 0x0b, 0x30, 0x54, 0x59, 0x7e, 0xa2, 0xeb, 0x89, 0x54, 0x65, 0x3e, 0x40, 0xd1, 0xde, 0xe6, 0xee, 0x4d, 0xbf, 0x5e, 0x40, 0x1d, 0xee, 0x4f, 0x68, 0xd9, 0xa7, 0x2f, 0xb3, 0x64, 0xb3, 0xf5, 0xc8, 0xd3, 0xaa, 0x70, 0x70, 0x3d, 0xef, 0xd3, 0x95, 0x54, 0xdb, 0x3e, 0x94, 0x95, 0x92, 0x1f, 0x45,
- /* (2^153)P */ 0x22, 0x80, 0x1d, 0x9d, 0x96, 0xa5, 0x78, 0x6f, 0xe0, 0x1e, 0x1b, 0x66, 0x42, 0xc8, 0xae, 0x9e, 0x46, 0x45, 0x08, 0x41, 0xdf, 0x80, 0xae, 0x6f, 0xdb, 0x15, 0x5a, 0x21, 0x31, 0x7a, 0xd0, 0xf2, 0x54, 0x15, 0x88, 0xd3, 0x0f, 0x7f, 0x14, 0x5a, 0x14, 0x97, 0xab, 0xf4, 0x58, 0x6a, 0x9f, 0xea, 0x74, 0xe5, 0x6b, 0x90, 0x59, 0x2b, 0x48, 0xd9,
- /* (2^154)P */ 0x12, 0x24, 0x04, 0xf5, 0x50, 0xc2, 0x8c, 0xb0, 0x7c, 0x46, 0x98, 0xd5, 0x24, 0xad, 0xf6, 0x72, 0xdc, 0x82, 0x1a, 0x60, 0xc1, 0xeb, 0x48, 0xef, 0x7f, 0x6e, 0xe6, 0xcc, 0xdb, 0x7b, 0xae, 0xbe, 0x5e, 0x1e, 0x5c, 0xe6, 0x0a, 0x70, 0xdf, 0xa4, 0xa3, 0x85, 0x1b, 0x1b, 0x7f, 0x72, 0xb9, 0x96, 0x6f, 0xdc, 0x03, 0x76, 0x66, 0xfb, 0xa0, 0x33,
- /* (2^155)P */ 0x37, 0x40, 0xbb, 0xbc, 0x68, 0x58, 0x86, 0xca, 0xbb, 0xa5, 0x24, 0x76, 0x3d, 0x48, 0xd1, 0xad, 0xb4, 0xa8, 0xcf, 0xc3, 0xb6, 0xa8, 0xba, 0x1a, 0x3a, 0xbe, 0x33, 0x75, 0x04, 0x5c, 0x13, 0x8c, 0x0d, 0x70, 0x8d, 0xa6, 0x4e, 0x2a, 0xeb, 0x17, 0x3c, 0x22, 0xdd, 0x3e, 0x96, 0x40, 0x11, 0x9e, 0x4e, 0xae, 0x3d, 0xf8, 0x91, 0xd7, 0x50, 0xc8,
- /* (2^156)P */ 0xd8, 0xca, 0xde, 0x19, 0xcf, 0x00, 0xe4, 0x73, 0x18, 0x7f, 0x9b, 0x9f, 0xf4, 0x5b, 0x49, 0x49, 0x99, 0xdc, 0xa4, 0x46, 0x21, 0xb5, 0xd7, 0x3e, 0xb7, 0x47, 0x1b, 0xa9, 0x9f, 0x4c, 0x69, 0x7d, 0xec, 0x33, 0xd6, 0x1c, 0x51, 0x7f, 0x47, 0x74, 0x7a, 0x6c, 0xf3, 0xd2, 0x2e, 0xbf, 0xdf, 0x6c, 0x9e, 0x77, 0x3b, 0x34, 0xf6, 0x73, 0x80, 0xed,
- /* (2^157)P */ 0x16, 0xfb, 0x16, 0xc3, 0xc2, 0x83, 0xe4, 0xf4, 0x03, 0x7f, 0x52, 0xb0, 0x67, 0x51, 0x7b, 0x24, 0x5a, 0x51, 0xd3, 0xb6, 0x4e, 0x59, 0x76, 0xcd, 0x08, 0x7b, 0x1d, 0x7a, 0x9c, 0x65, 0xae, 0xce, 0xaa, 0xd2, 0x1c, 0x85, 0x66, 0x68, 0x06, 0x15, 0xa8, 0x06, 0xe6, 0x16, 0x37, 0xf4, 0x49, 0x9e, 0x0f, 0x50, 0x37, 0xb1, 0xb2, 0x93, 0x70, 0x43,
- /* (2^158)P */ 0x18, 0x3a, 0x16, 0xe5, 0x8d, 0xc8, 0x35, 0xd6, 0x7b, 0x09, 0xec, 0x61, 0x5f, 0x5c, 0x2a, 0x19, 0x96, 0x2e, 0xc3, 0xfd, 0xab, 0xe6, 0x23, 0xae, 0xab, 0xc5, 0xcb, 0xb9, 0x7b, 0x2d, 0x34, 0x51, 0xb9, 0x41, 0x9e, 0x7d, 0xca, 0xda, 0x25, 0x45, 0x14, 0xb0, 0xc7, 0x4d, 0x26, 0x2b, 0xfe, 0x43, 0xb0, 0x21, 0x5e, 0xfa, 0xdc, 0x7c, 0xf9, 0x5a,
- /* (2^159)P */ 0x94, 0xad, 0x42, 0x17, 0xf5, 0xcd, 0x1c, 0x0d, 0xf6, 0x41, 0xd2, 0x55, 0xbb, 0x50, 0xf1, 0xc6, 0xbc, 0xa6, 0xc5, 0x3a, 0xfd, 0x9b, 0x75, 0x3e, 0xf6, 0x1a, 0xa7, 0xb2, 0x6e, 0x64, 0x12, 0xdc, 0x3c, 0xe5, 0xf6, 0xfc, 0x3b, 0xfa, 0x43, 0x81, 0xd4, 0xa5, 0xee, 0xf5, 0x9c, 0x47, 0x2f, 0xd0, 0x9c, 0xde, 0xa1, 0x48, 0x91, 0x9a, 0x34, 0xc1,
- /* (2^160)P */ 0x37, 0x1b, 0xb3, 0x88, 0xc9, 0x98, 0x4e, 0xfb, 0x84, 0x4f, 0x2b, 0x0a, 0xb6, 0x8f, 0x35, 0x15, 0xcd, 0x61, 0x7a, 0x5f, 0x5c, 0xa0, 0xca, 0x23, 0xa0, 0x93, 0x1f, 0xcc, 0x3c, 0x39, 0x3a, 0x24, 0xa7, 0x49, 0xad, 0x8d, 0x59, 0xcc, 0x94, 0x5a, 0x16, 0xf5, 0x70, 0xe8, 0x52, 0x1e, 0xee, 0x20, 0x30, 0x17, 0x7e, 0xf0, 0x4c, 0x93, 0x06, 0x5a,
- /* (2^161)P */ 0x81, 0xba, 0x3b, 0xd7, 0x3e, 0xb4, 0x32, 0x3a, 0x22, 0x39, 0x2a, 0xfc, 0x19, 0xd9, 0xd2, 0xf6, 0xc5, 0x79, 0x6c, 0x0e, 0xde, 0xda, 0x01, 0xff, 0x52, 0xfb, 0xb6, 0x95, 0x4e, 0x7a, 0x10, 0xb8, 0x06, 0x86, 0x3c, 0xcd, 0x56, 0xd6, 0x15, 0xbf, 0x6e, 0x3e, 0x4f, 0x35, 0x5e, 0xca, 0xbc, 0xa5, 0x95, 0xa2, 0xdf, 0x2d, 0x1d, 0xaf, 0x59, 0xf9,
- /* (2^162)P */ 0x69, 0xe5, 0xe2, 0xfa, 0xc9, 0x7f, 0xdd, 0x09, 0xf5, 0x6b, 0x4e, 0x2e, 0xbe, 0xb4, 0xbf, 0x3e, 0xb2, 0xf2, 0x81, 0x30, 0xe1, 0x07, 0xa8, 0x0d, 0x2b, 0xd2, 0x5a, 0x55, 0xbe, 0x4b, 0x86, 0x5d, 0xb0, 0x5e, 0x7c, 0x8f, 0xc1, 0x3c, 0x81, 0x4c, 0xf7, 0x6d, 0x7d, 0xe6, 0x4f, 0x8a, 0x85, 0xc2, 0x2f, 0x28, 0xef, 0x8c, 0x69, 0xc2, 0xc2, 0x1a,
- /* (2^163)P */ 0xd9, 0xe4, 0x0e, 0x1e, 0xc2, 0xf7, 0x2f, 0x9f, 0xa1, 0x40, 0xfe, 0x46, 0x16, 0xaf, 0x2e, 0xd1, 0xec, 0x15, 0x9b, 0x61, 0x92, 0xce, 0xfc, 0x10, 0x43, 0x1d, 0x00, 0xf6, 0xbe, 0x20, 0x80, 0x80, 0x6f, 0x3c, 0x16, 0x94, 0x59, 0xba, 0x03, 0x53, 0x6e, 0xb6, 0xdd, 0x25, 0x7b, 0x86, 0xbf, 0x96, 0xf4, 0x2f, 0xa1, 0x96, 0x8d, 0xf9, 0xb3, 0x29,
- /* (2^164)P */ 0x3b, 0x04, 0x60, 0x6e, 0xce, 0xab, 0xd2, 0x63, 0x18, 0x53, 0x88, 0x16, 0x4a, 0x6a, 0xab, 0x72, 0x03, 0x68, 0xa5, 0xd4, 0x0d, 0xb2, 0x82, 0x81, 0x1f, 0x2b, 0x5c, 0x75, 0xe8, 0xd2, 0x1d, 0x7f, 0xe7, 0x1b, 0x35, 0x02, 0xde, 0xec, 0xbd, 0xcb, 0xc7, 0x01, 0xd3, 0x95, 0x61, 0xfe, 0xb2, 0x7a, 0x66, 0x09, 0x4c, 0x6d, 0xfd, 0x39, 0xf7, 0x52,
- /* (2^165)P */ 0x42, 0xc1, 0x5f, 0xf8, 0x35, 0x52, 0xc1, 0xfe, 0xc5, 0x11, 0x80, 0x1c, 0x11, 0x46, 0x31, 0x11, 0xbe, 0xd0, 0xc4, 0xb6, 0x07, 0x13, 0x38, 0xa0, 0x8d, 0x65, 0xf0, 0x56, 0x9e, 0x16, 0xbf, 0x9d, 0xcd, 0x51, 0x34, 0xf9, 0x08, 0x48, 0x7b, 0x76, 0x0c, 0x7b, 0x30, 0x07, 0xa8, 0x76, 0xaf, 0xa3, 0x29, 0x38, 0xb0, 0x58, 0xde, 0x72, 0x4b, 0x45,
- /* (2^166)P */ 0xd4, 0x16, 0xa7, 0xc0, 0xb4, 0x9f, 0xdf, 0x1a, 0x37, 0xc8, 0x35, 0xed, 0xc5, 0x85, 0x74, 0x64, 0x09, 0x22, 0xef, 0xe9, 0x0c, 0xaf, 0x12, 0x4c, 0x9e, 0xf8, 0x47, 0x56, 0xe0, 0x7f, 0x4e, 0x24, 0x6b, 0x0c, 0xe7, 0xad, 0xc6, 0x47, 0x1d, 0xa4, 0x0d, 0x86, 0x89, 0x65, 0xe8, 0x5f, 0x71, 0xc7, 0xe9, 0xcd, 0xec, 0x6c, 0x62, 0xc7, 0xe3, 0xb3,
- /* (2^167)P */ 0xb5, 0xea, 0x86, 0xe3, 0x15, 0x18, 0x3f, 0x6d, 0x7b, 0x05, 0x95, 0x15, 0x53, 0x26, 0x1c, 0xeb, 0xbe, 0x7e, 0x16, 0x42, 0x4b, 0xa2, 0x3d, 0xdd, 0x0e, 0xff, 0xba, 0x67, 0xb5, 0xae, 0x7a, 0x17, 0xde, 0x23, 0xad, 0x14, 0xcc, 0xd7, 0xaf, 0x57, 0x01, 0xe0, 0xdd, 0x48, 0xdd, 0xd7, 0xe3, 0xdf, 0xe9, 0x2d, 0xda, 0x67, 0xa4, 0x9f, 0x29, 0x04,
- /* (2^168)P */ 0x16, 0x53, 0xe6, 0x9c, 0x4e, 0xe5, 0x1e, 0x70, 0x81, 0x25, 0x02, 0x9b, 0x47, 0x6d, 0xd2, 0x08, 0x73, 0xbe, 0x0a, 0xf1, 0x7b, 0xeb, 0x24, 0xeb, 0x38, 0x23, 0x5c, 0xb6, 0x3e, 0xce, 0x1e, 0xe3, 0xbc, 0x82, 0x35, 0x1f, 0xaf, 0x3a, 0x3a, 0xe5, 0x4e, 0xc1, 0xca, 0xbf, 0x47, 0xb4, 0xbb, 0xbc, 0x5f, 0xea, 0xc6, 0xca, 0xf3, 0xa0, 0xa2, 0x73,
- /* (2^169)P */ 0xef, 0xa4, 0x7a, 0x4e, 0xe4, 0xc7, 0xb6, 0x43, 0x2e, 0xa5, 0xe4, 0xa5, 0xba, 0x1e, 0xa5, 0xfe, 0x9e, 0xce, 0xa9, 0x80, 0x04, 0xcb, 0x4f, 0xd8, 0x74, 0x05, 0x48, 0xfa, 0x99, 0x11, 0x5d, 0x97, 0x3b, 0x07, 0x0d, 0xdd, 0xe6, 0xb1, 0x74, 0x87, 0x1a, 0xd3, 0x26, 0xb7, 0x8f, 0xe1, 0x63, 0x3d, 0xec, 0x53, 0x93, 0xb0, 0x81, 0x78, 0x34, 0xa4,
- /* (2^170)P */ 0xe1, 0xe7, 0xd4, 0x58, 0x9d, 0x0e, 0x8b, 0x65, 0x66, 0x37, 0x16, 0x48, 0x6f, 0xaa, 0x42, 0x37, 0x77, 0xad, 0xb1, 0x56, 0x48, 0xdf, 0x65, 0x36, 0x30, 0xb8, 0x00, 0x12, 0xd8, 0x32, 0x28, 0x7f, 0xc1, 0x71, 0xeb, 0x93, 0x0f, 0x48, 0x04, 0xe1, 0x5a, 0x6a, 0x96, 0xc1, 0xca, 0x89, 0x6d, 0x1b, 0x82, 0x4c, 0x18, 0x6d, 0x55, 0x4b, 0xea, 0xfd,
- /* (2^171)P */ 0x62, 0x1a, 0x53, 0xb4, 0xb1, 0xbe, 0x6f, 0x15, 0x18, 0x88, 0xd4, 0x66, 0x61, 0xc7, 0x12, 0x69, 0x02, 0xbd, 0x03, 0x23, 0x2b, 0xef, 0xf9, 0x54, 0xa4, 0x85, 0xa8, 0xe3, 0xb7, 0xbd, 0xa9, 0xa3, 0xf3, 0x2a, 0xdd, 0xf1, 0xd4, 0x03, 0x0f, 0xa9, 0xa1, 0xd8, 0xa3, 0xcd, 0xb2, 0x71, 0x90, 0x4b, 0x35, 0x62, 0xf2, 0x2f, 0xce, 0x67, 0x1f, 0xaa,
- /* (2^172)P */ 0x9e, 0x1e, 0xcd, 0x43, 0x7e, 0x87, 0x37, 0x94, 0x3a, 0x97, 0x4c, 0x7e, 0xee, 0xc9, 0x37, 0x85, 0xf1, 0xd9, 0x4f, 0xbf, 0xf9, 0x6f, 0x39, 0x9a, 0x39, 0x87, 0x2e, 0x25, 0x84, 0x42, 0xc3, 0x80, 0xcb, 0x07, 0x22, 0xae, 0x30, 0xd5, 0x50, 0xa1, 0x23, 0xcc, 0x31, 0x81, 0x9d, 0xf1, 0x30, 0xd9, 0x2b, 0x73, 0x41, 0x16, 0x50, 0xab, 0x2d, 0xa2,
- /* (2^173)P */ 0xa4, 0x69, 0x4f, 0xa1, 0x4e, 0xb9, 0xbf, 0x14, 0xe8, 0x2b, 0x04, 0x93, 0xb7, 0x6e, 0x9f, 0x7d, 0x73, 0x0a, 0xc5, 0x14, 0xb8, 0xde, 0x8c, 0xc1, 0xfe, 0xc0, 0xa7, 0xa4, 0xcc, 0x42, 0x42, 0x81, 0x15, 0x65, 0x8a, 0x80, 0xb9, 0xde, 0x1f, 0x60, 0x33, 0x0e, 0xcb, 0xfc, 0xe0, 0xdb, 0x83, 0xa1, 0xe5, 0xd0, 0x16, 0x86, 0x2c, 0xe2, 0x87, 0xed,
- /* (2^174)P */ 0x7a, 0xc0, 0xeb, 0x6b, 0xf6, 0x0d, 0x4c, 0x6d, 0x1e, 0xdb, 0xab, 0xe7, 0x19, 0x45, 0xc6, 0xe3, 0xb2, 0x06, 0xbb, 0xbc, 0x70, 0x99, 0x83, 0x33, 0xeb, 0x28, 0xc8, 0x77, 0xf6, 0x4d, 0x01, 0xb7, 0x59, 0xa0, 0xd2, 0xb3, 0x2a, 0x72, 0x30, 0xe7, 0x11, 0x39, 0xb6, 0x41, 0x29, 0x65, 0x5a, 0x14, 0xb9, 0x86, 0x08, 0xe0, 0x7d, 0x32, 0x8c, 0xf0,
- /* (2^175)P */ 0x5c, 0x11, 0x30, 0x9e, 0x05, 0x27, 0xf5, 0x45, 0x0f, 0xb3, 0xc9, 0x75, 0xc3, 0xd7, 0xe1, 0x82, 0x3b, 0x8e, 0x87, 0x23, 0x00, 0x15, 0x19, 0x07, 0xd9, 0x21, 0x53, 0xc7, 0xf1, 0xa3, 0xbf, 0x70, 0x64, 0x15, 0x18, 0xca, 0x23, 0x9e, 0xd3, 0x08, 0xc3, 0x2a, 0x8b, 0xe5, 0x83, 0x04, 0x89, 0x14, 0xfd, 0x28, 0x25, 0x1c, 0xe3, 0x26, 0xa7, 0x22,
- /* (2^176)P */ 0xdc, 0xd4, 0x75, 0x60, 0x99, 0x94, 0xea, 0x09, 0x8e, 0x8a, 0x3c, 0x1b, 0xf9, 0xbd, 0x33, 0x0d, 0x51, 0x3d, 0x12, 0x6f, 0x4e, 0x72, 0xe0, 0x17, 0x20, 0xe9, 0x75, 0xe6, 0x3a, 0xb2, 0x13, 0x83, 0x4e, 0x7a, 0x08, 0x9e, 0xd1, 0x04, 0x5f, 0x6b, 0x42, 0x0b, 0x76, 0x2a, 0x2d, 0x77, 0x53, 0x6c, 0x65, 0x6d, 0x8e, 0x25, 0x3c, 0xb6, 0x8b, 0x69,
- /* (2^177)P */ 0xb9, 0x49, 0x28, 0xd0, 0xdc, 0x6c, 0x8f, 0x4c, 0xc9, 0x14, 0x8a, 0x38, 0xa3, 0xcb, 0xc4, 0x9d, 0x53, 0xcf, 0xe9, 0xe3, 0xcf, 0xe0, 0xb1, 0xf2, 0x1b, 0x4c, 0x7f, 0x83, 0x2a, 0x7a, 0xe9, 0x8b, 0x3b, 0x86, 0x61, 0x30, 0xe9, 0x99, 0xbd, 0xba, 0x19, 0x6e, 0x65, 0x2a, 0x12, 0x3e, 0x9c, 0xa8, 0xaf, 0xc3, 0xcf, 0xf8, 0x1f, 0x77, 0x86, 0xea,
- /* (2^178)P */ 0x30, 0xde, 0xe7, 0xff, 0x54, 0xf7, 0xa2, 0x59, 0xf6, 0x0b, 0xfb, 0x7a, 0xf2, 0x39, 0xf0, 0xdb, 0x39, 0xbc, 0xf0, 0xfa, 0x60, 0xeb, 0x6b, 0x4f, 0x47, 0x17, 0xc8, 0x00, 0x65, 0x6d, 0x25, 0x1c, 0xd0, 0x48, 0x56, 0x53, 0x45, 0x11, 0x30, 0x02, 0x49, 0x20, 0x27, 0xac, 0xf2, 0x4c, 0xac, 0x64, 0x3d, 0x52, 0xb8, 0x89, 0xe0, 0x93, 0x16, 0x0f,
- /* (2^179)P */ 0x84, 0x09, 0xba, 0x40, 0xb2, 0x2f, 0xa3, 0xa8, 0xc2, 0xba, 0x46, 0x33, 0x05, 0x9d, 0x62, 0xad, 0xa1, 0x3c, 0x33, 0xef, 0x0d, 0xeb, 0xf0, 0x77, 0x11, 0x5a, 0xb0, 0x21, 0x9c, 0xdf, 0x55, 0x24, 0x25, 0x35, 0x51, 0x61, 0x92, 0xf0, 0xb1, 0xce, 0xf5, 0xd4, 0x7b, 0x6c, 0x21, 0x9d, 0x56, 0x52, 0xf8, 0xa1, 0x4c, 0xe9, 0x27, 0x55, 0xac, 0x91,
- /* (2^180)P */ 0x03, 0x3e, 0x30, 0xd2, 0x0a, 0xfa, 0x7d, 0x82, 0x3d, 0x1f, 0x8b, 0xcb, 0xb6, 0x04, 0x5c, 0xcc, 0x8b, 0xda, 0xe2, 0x68, 0x74, 0x08, 0x8c, 0x44, 0x83, 0x57, 0x6d, 0x6f, 0x80, 0xb0, 0x7e, 0xa9, 0x82, 0x91, 0x7b, 0x4c, 0x37, 0x97, 0xd1, 0x63, 0xd1, 0xbd, 0x45, 0xe6, 0x8a, 0x86, 0xd6, 0x89, 0x54, 0xfd, 0xd2, 0xb1, 0xd7, 0x54, 0xad, 0xaf,
- /* (2^181)P */ 0x8b, 0x33, 0x62, 0x49, 0x9f, 0x63, 0xf9, 0x87, 0x42, 0x58, 0xbf, 0xb3, 0xe6, 0x68, 0x02, 0x60, 0x5c, 0x76, 0x62, 0xf7, 0x61, 0xd7, 0x36, 0x31, 0xf7, 0x9c, 0xb5, 0xe5, 0x13, 0x6c, 0xea, 0x78, 0xae, 0xcf, 0xde, 0xbf, 0xb6, 0xeb, 0x4f, 0xc8, 0x2a, 0xb4, 0x9a, 0x9f, 0xf3, 0xd1, 0x6a, 0xec, 0x0c, 0xbd, 0x85, 0x98, 0x40, 0x06, 0x1c, 0x2a,
- /* (2^182)P */ 0x74, 0x3b, 0xe7, 0x81, 0xd5, 0xae, 0x54, 0x56, 0x03, 0xe8, 0x97, 0x16, 0x76, 0xcf, 0x24, 0x96, 0x96, 0x5b, 0xcc, 0x09, 0xab, 0x23, 0x6f, 0x54, 0xae, 0x8f, 0xe4, 0x12, 0xcb, 0xfd, 0xbc, 0xac, 0x93, 0x45, 0x3d, 0x68, 0x08, 0x22, 0x59, 0xc6, 0xf0, 0x47, 0x19, 0x8c, 0x79, 0x93, 0x1e, 0x0e, 0x30, 0xb0, 0x94, 0xfb, 0x17, 0x1d, 0x5a, 0x12,
- /* (2^183)P */ 0x85, 0xff, 0x40, 0x18, 0x85, 0xff, 0x44, 0x37, 0x69, 0x23, 0x4d, 0x34, 0xe1, 0xeb, 0xa3, 0x1b, 0x55, 0x40, 0xc1, 0x64, 0xf4, 0xd4, 0x13, 0x0a, 0x9f, 0xb9, 0x19, 0xfc, 0x88, 0x7d, 0xc0, 0x72, 0xcf, 0x69, 0x2f, 0xd2, 0x0c, 0x82, 0x0f, 0xda, 0x08, 0xba, 0x0f, 0xaa, 0x3b, 0xe9, 0xe5, 0x83, 0x7a, 0x06, 0xe8, 0x1b, 0x38, 0x43, 0xc3, 0x54,
- /* (2^184)P */ 0x14, 0xaa, 0xb3, 0x6e, 0xe6, 0x28, 0xee, 0xc5, 0x22, 0x6c, 0x7c, 0xf9, 0xa8, 0x71, 0xcc, 0xfe, 0x68, 0x7e, 0xd3, 0xb8, 0x37, 0x96, 0xca, 0x0b, 0xd9, 0xb6, 0x06, 0xa9, 0xf6, 0x71, 0xe8, 0x31, 0xf7, 0xd8, 0xf1, 0x5d, 0xab, 0xb9, 0xf0, 0x5c, 0x98, 0xcf, 0x22, 0xa2, 0x2a, 0xf6, 0xd0, 0x59, 0xf0, 0x9d, 0xd9, 0x6a, 0x4f, 0x59, 0x57, 0xad,
- /* (2^185)P */ 0xd7, 0x2b, 0x3d, 0x38, 0x4c, 0x2e, 0x23, 0x4d, 0x49, 0xa2, 0x62, 0x62, 0xf9, 0x0f, 0xde, 0x08, 0xf3, 0x86, 0x71, 0xb6, 0xc7, 0xf9, 0x85, 0x9c, 0x33, 0xa1, 0xcf, 0x16, 0xaa, 0x60, 0xb9, 0xb7, 0xea, 0xed, 0x01, 0x1c, 0x59, 0xdb, 0x3f, 0x3f, 0x97, 0x2e, 0xf0, 0x09, 0x9f, 0x10, 0x85, 0x5f, 0x53, 0x39, 0xf3, 0x13, 0x40, 0x56, 0x95, 0xf9,
- /* (2^186)P */ 0xb4, 0xe3, 0xda, 0xc6, 0x1f, 0x78, 0x8e, 0xac, 0xd4, 0x20, 0x1d, 0xa0, 0xbf, 0x4c, 0x09, 0x16, 0xa7, 0x30, 0xb5, 0x8d, 0x9e, 0xa1, 0x5f, 0x6d, 0x52, 0xf4, 0x71, 0xb6, 0x32, 0x2d, 0x21, 0x51, 0xc6, 0xfc, 0x2f, 0x08, 0xf4, 0x13, 0x6c, 0x55, 0xba, 0x72, 0x81, 0x24, 0x49, 0x0e, 0x4f, 0x06, 0x36, 0x39, 0x6a, 0xc5, 0x81, 0xfc, 0xeb, 0xb2,
- /* (2^187)P */ 0x7d, 0x8d, 0xc8, 0x6c, 0xea, 0xb4, 0xb9, 0xe8, 0x40, 0xc9, 0x69, 0xc9, 0x30, 0x05, 0xfd, 0x34, 0x46, 0xfd, 0x94, 0x05, 0x16, 0xf5, 0x4b, 0x13, 0x3d, 0x24, 0x1a, 0xd6, 0x64, 0x2b, 0x9c, 0xe2, 0xa5, 0xd9, 0x98, 0xe0, 0xe8, 0xf4, 0xbc, 0x2c, 0xbd, 0xa2, 0x56, 0xe3, 0x9e, 0x14, 0xdb, 0xbf, 0x05, 0xbf, 0x9a, 0x13, 0x5d, 0xf7, 0x91, 0xa3,
- /* (2^188)P */ 0x8b, 0xcb, 0x27, 0xf3, 0x15, 0x26, 0x05, 0x40, 0x0f, 0xa6, 0x15, 0x13, 0x71, 0x95, 0xa2, 0xc6, 0x38, 0x04, 0x67, 0xf8, 0x9a, 0x83, 0x06, 0xaa, 0x25, 0x36, 0x72, 0x01, 0x6f, 0x74, 0x5f, 0xe5, 0x6e, 0x44, 0x99, 0xce, 0x13, 0xbc, 0x82, 0xc2, 0x0d, 0xa4, 0x98, 0x50, 0x38, 0xf3, 0xa2, 0xc5, 0xe5, 0x24, 0x1f, 0x6f, 0x56, 0x3e, 0x07, 0xb2,
- /* (2^189)P */ 0xbd, 0x0f, 0x32, 0x60, 0x07, 0xb1, 0xd7, 0x0b, 0x11, 0x07, 0x57, 0x02, 0x89, 0xe8, 0x8b, 0xe8, 0x5a, 0x1f, 0xee, 0x54, 0x6b, 0xff, 0xb3, 0x04, 0x07, 0x57, 0x13, 0x0b, 0x94, 0xa8, 0x4d, 0x81, 0xe2, 0x17, 0x16, 0x45, 0xd4, 0x4b, 0xf7, 0x7e, 0x64, 0x66, 0x20, 0xe8, 0x0b, 0x26, 0xfd, 0xa9, 0x8a, 0x47, 0x52, 0x89, 0x14, 0xd0, 0xd1, 0xa1,
- /* (2^190)P */ 0xdc, 0x03, 0xe6, 0x20, 0x44, 0x47, 0x8f, 0x04, 0x16, 0x24, 0x22, 0xc1, 0x55, 0x5c, 0xbe, 0x43, 0xc3, 0x92, 0xc5, 0x54, 0x3d, 0x5d, 0xd1, 0x05, 0x9c, 0xc6, 0x7c, 0xbf, 0x23, 0x84, 0x1a, 0xba, 0x4f, 0x1f, 0xfc, 0xa1, 0xae, 0x1a, 0x64, 0x02, 0x51, 0xf1, 0xcb, 0x7a, 0x20, 0xce, 0xb2, 0x34, 0x3c, 0xca, 0xe0, 0xe4, 0xba, 0x22, 0xd4, 0x7b,
- /* (2^191)P */ 0xca, 0xfd, 0xca, 0xd7, 0xde, 0x61, 0xae, 0xf0, 0x79, 0x0c, 0x20, 0xab, 0xbc, 0x6f, 0x4d, 0x61, 0xf0, 0xc7, 0x9c, 0x8d, 0x4b, 0x52, 0xf3, 0xb9, 0x48, 0x63, 0x0b, 0xb6, 0xd2, 0x25, 0x9a, 0x96, 0x72, 0xc1, 0x6b, 0x0c, 0xb5, 0xfb, 0x71, 0xaa, 0xad, 0x47, 0x5b, 0xe7, 0xc0, 0x0a, 0x55, 0xb2, 0xd4, 0x16, 0x2f, 0xb1, 0x01, 0xfd, 0xce, 0x27,
- /* (2^192)P */ 0x64, 0x11, 0x4b, 0xab, 0x57, 0x09, 0xc6, 0x49, 0x4a, 0x37, 0xc3, 0x36, 0xc4, 0x7b, 0x81, 0x1f, 0x42, 0xed, 0xbb, 0xe0, 0xa0, 0x8d, 0x51, 0xe6, 0xca, 0x8b, 0xb9, 0xcd, 0x99, 0x2d, 0x91, 0x53, 0xa9, 0x47, 0xcb, 0x32, 0xc7, 0xa4, 0x92, 0xec, 0x46, 0x74, 0x44, 0x6d, 0x71, 0x9f, 0x6d, 0x0c, 0x69, 0xa4, 0xf8, 0xbe, 0x9f, 0x7f, 0xa0, 0xd7,
- /* (2^193)P */ 0x5f, 0x33, 0xb6, 0x91, 0xc8, 0xa5, 0x3f, 0x5d, 0x7f, 0x38, 0x6e, 0x74, 0x20, 0x4a, 0xd6, 0x2b, 0x98, 0x2a, 0x41, 0x4b, 0x83, 0x64, 0x0b, 0x92, 0x7a, 0x06, 0x1e, 0xc6, 0x2c, 0xf6, 0xe4, 0x91, 0xe5, 0xb1, 0x2e, 0x6e, 0x4e, 0xa8, 0xc8, 0x14, 0x32, 0x57, 0x44, 0x1c, 0xe4, 0xb9, 0x7f, 0x54, 0x51, 0x08, 0x81, 0xaa, 0x4e, 0xce, 0xa1, 0x5d,
- /* (2^194)P */ 0x5c, 0xd5, 0x9b, 0x5e, 0x7c, 0xb5, 0xb1, 0x52, 0x73, 0x00, 0x41, 0x56, 0x79, 0x08, 0x7e, 0x07, 0x28, 0x06, 0xa6, 0xfb, 0x7f, 0x69, 0xbd, 0x7a, 0x3c, 0xae, 0x9f, 0x39, 0xbb, 0x54, 0xa2, 0x79, 0xb9, 0x0e, 0x7f, 0xbb, 0xe0, 0xe6, 0xb7, 0x27, 0x64, 0x38, 0x45, 0xdb, 0x84, 0xe4, 0x61, 0x72, 0x3f, 0xe2, 0x24, 0xfe, 0x7a, 0x31, 0x9a, 0xc9,
- /* (2^195)P */ 0xa1, 0xd2, 0xa4, 0xee, 0x24, 0x96, 0xe5, 0x5b, 0x79, 0x78, 0x3c, 0x7b, 0x82, 0x3b, 0x8b, 0x58, 0x0b, 0xa3, 0x63, 0x2d, 0xbc, 0x75, 0x46, 0xe8, 0x83, 0x1a, 0xc0, 0x2a, 0x92, 0x61, 0xa8, 0x75, 0x37, 0x3c, 0xbf, 0x0f, 0xef, 0x8f, 0x6c, 0x97, 0x75, 0x10, 0x05, 0x7a, 0xde, 0x23, 0xe8, 0x2a, 0x35, 0xeb, 0x41, 0x64, 0x7d, 0xcf, 0xe0, 0x52,
- /* (2^196)P */ 0x4a, 0xd0, 0x49, 0x93, 0xae, 0xf3, 0x24, 0x8c, 0xe1, 0x09, 0x98, 0x45, 0xd8, 0xb9, 0xfe, 0x8e, 0x8c, 0xa8, 0x2c, 0xc9, 0x9f, 0xce, 0x01, 0xdc, 0x38, 0x11, 0xab, 0x85, 0xb9, 0xe8, 0x00, 0x51, 0xfd, 0x82, 0xe1, 0x9b, 0x4e, 0xfc, 0xb5, 0x2a, 0x0f, 0x8b, 0xda, 0x4e, 0x02, 0xca, 0xcc, 0xe3, 0x91, 0xc4, 0xe0, 0xcf, 0x7b, 0xd6, 0xe6, 0x6a,
- /* (2^197)P */ 0xfe, 0x11, 0xd7, 0xaa, 0xe3, 0x0c, 0x52, 0x2e, 0x04, 0xe0, 0xe0, 0x61, 0xc8, 0x05, 0xd7, 0x31, 0x4c, 0xc3, 0x9b, 0x2d, 0xce, 0x59, 0xbe, 0x12, 0xb7, 0x30, 0x21, 0xfc, 0x81, 0xb8, 0x5e, 0x57, 0x73, 0xd0, 0xad, 0x8e, 0x9e, 0xe4, 0xeb, 0xcd, 0xcf, 0xd2, 0x0f, 0x01, 0x35, 0x16, 0xed, 0x7a, 0x43, 0x8e, 0x42, 0xdc, 0xea, 0x4c, 0xa8, 0x7c,
- /* (2^198)P */ 0x37, 0x26, 0xcc, 0x76, 0x0b, 0xe5, 0x76, 0xdd, 0x3e, 0x19, 0x3c, 0xc4, 0x6c, 0x7f, 0xd0, 0x03, 0xc1, 0xb8, 0x59, 0x82, 0xca, 0x36, 0xc1, 0xe4, 0xc8, 0xb2, 0x83, 0x69, 0x9c, 0xc5, 0x9d, 0x12, 0x82, 0x1c, 0xea, 0xb2, 0x84, 0x9f, 0xf3, 0x52, 0x6b, 0xbb, 0xd8, 0x81, 0x56, 0x83, 0x04, 0x66, 0x05, 0x22, 0x49, 0x37, 0x93, 0xb1, 0xfd, 0xd5,
- /* (2^199)P */ 0xaf, 0x96, 0xbf, 0x03, 0xbe, 0xe6, 0x5d, 0x78, 0x19, 0xba, 0x37, 0x46, 0x0a, 0x2b, 0x52, 0x7c, 0xd8, 0x51, 0x9e, 0x3d, 0x29, 0x42, 0xdb, 0x0e, 0x31, 0x20, 0x94, 0xf8, 0x43, 0x9a, 0x2d, 0x22, 0xd3, 0xe3, 0xa1, 0x79, 0x68, 0xfb, 0x2d, 0x7e, 0xd6, 0x79, 0xda, 0x0b, 0xc6, 0x5b, 0x76, 0x68, 0xf0, 0xfe, 0x72, 0x59, 0xbb, 0xa1, 0x9c, 0x74,
- /* (2^200)P */ 0x0a, 0xd9, 0xec, 0xc5, 0xbd, 0xf0, 0xda, 0xcf, 0x82, 0xab, 0x46, 0xc5, 0x32, 0x13, 0xdc, 0x5b, 0xac, 0xc3, 0x53, 0x9a, 0x7f, 0xef, 0xa5, 0x40, 0x5a, 0x1f, 0xc1, 0x12, 0x91, 0x54, 0x83, 0x6a, 0xb0, 0x9a, 0x85, 0x4d, 0xbf, 0x36, 0x8e, 0xd3, 0xa2, 0x2b, 0xe5, 0xd6, 0xc6, 0xe1, 0x58, 0x5b, 0x82, 0x9b, 0xc8, 0xf2, 0x03, 0xba, 0xf5, 0x92,
- /* (2^201)P */ 0xfb, 0x21, 0x7e, 0xde, 0xe7, 0xb4, 0xc0, 0x56, 0x86, 0x3a, 0x5b, 0x78, 0xf8, 0xf0, 0xf4, 0xe7, 0x5c, 0x00, 0xd2, 0xd7, 0xd6, 0xf8, 0x75, 0x5e, 0x0f, 0x3e, 0xd1, 0x4b, 0x77, 0xd8, 0xad, 0xb0, 0xc9, 0x8b, 0x59, 0x7d, 0x30, 0x76, 0x64, 0x7a, 0x76, 0xd9, 0x51, 0x69, 0xfc, 0xbd, 0x8e, 0xb5, 0x55, 0xe0, 0xd2, 0x07, 0x15, 0xa9, 0xf7, 0xa4,
- /* (2^202)P */ 0xaa, 0x2d, 0x2f, 0x2b, 0x3c, 0x15, 0xdd, 0xcd, 0xe9, 0x28, 0x82, 0x4f, 0xa2, 0xaa, 0x31, 0x48, 0xcc, 0xfa, 0x07, 0x73, 0x8a, 0x34, 0x74, 0x0d, 0xab, 0x1a, 0xca, 0xd2, 0xbf, 0x3a, 0xdb, 0x1a, 0x5f, 0x50, 0x62, 0xf4, 0x6b, 0x83, 0x38, 0x43, 0x96, 0xee, 0x6b, 0x39, 0x1e, 0xf0, 0x17, 0x80, 0x1e, 0x9b, 0xed, 0x2b, 0x2f, 0xcc, 0x65, 0xf7,
- /* (2^203)P */ 0x03, 0xb3, 0x23, 0x9c, 0x0d, 0xd1, 0xeb, 0x7e, 0x34, 0x17, 0x8a, 0x4c, 0xde, 0x54, 0x39, 0xc4, 0x11, 0x82, 0xd3, 0xa4, 0x00, 0x32, 0x95, 0x9c, 0xa6, 0x64, 0x76, 0x6e, 0xd6, 0x53, 0x27, 0xb4, 0x6a, 0x14, 0x8c, 0x54, 0xf6, 0x58, 0x9e, 0x22, 0x4a, 0x55, 0x18, 0x77, 0xd0, 0x08, 0x6b, 0x19, 0x8a, 0xb5, 0xe7, 0x19, 0xb8, 0x60, 0x92, 0xb1,
- /* (2^204)P */ 0x66, 0xec, 0xf3, 0x12, 0xde, 0x67, 0x7f, 0xd4, 0x5b, 0xf6, 0x70, 0x64, 0x0a, 0xb5, 0xc2, 0xf9, 0xb3, 0x64, 0xab, 0x56, 0x46, 0xc7, 0x93, 0xc2, 0x8b, 0x2d, 0xd0, 0xd6, 0x39, 0x3b, 0x1f, 0xcd, 0xb3, 0xac, 0xcc, 0x2c, 0x27, 0x6a, 0xbc, 0xb3, 0x4b, 0xa8, 0x3c, 0x69, 0x20, 0xe2, 0x18, 0x35, 0x17, 0xe1, 0x8a, 0xd3, 0x11, 0x74, 0xaa, 0x4d,
- /* (2^205)P */ 0x96, 0xc4, 0x16, 0x7e, 0xfd, 0xf5, 0xd0, 0x7d, 0x1f, 0x32, 0x1b, 0xdb, 0xa6, 0xfd, 0x51, 0x75, 0x4d, 0xd7, 0x00, 0xe5, 0x7f, 0x58, 0x5b, 0xeb, 0x4b, 0x6a, 0x78, 0xfe, 0xe5, 0xd6, 0x8f, 0x99, 0x17, 0xca, 0x96, 0x45, 0xf7, 0x52, 0xdf, 0x84, 0x06, 0x77, 0xb9, 0x05, 0x63, 0x5d, 0xe9, 0x91, 0xb1, 0x4b, 0x82, 0x5a, 0xdb, 0xd7, 0xca, 0x69,
- /* (2^206)P */ 0x02, 0xd3, 0x38, 0x38, 0x87, 0xea, 0xbd, 0x9f, 0x11, 0xca, 0xf3, 0x21, 0xf1, 0x9b, 0x35, 0x97, 0x98, 0xff, 0x8e, 0x6d, 0x3d, 0xd6, 0xb2, 0xfa, 0x68, 0xcb, 0x7e, 0x62, 0x85, 0xbb, 0xc7, 0x5d, 0xee, 0x32, 0x30, 0x2e, 0x71, 0x96, 0x63, 0x43, 0x98, 0xc4, 0xa7, 0xde, 0x60, 0xb2, 0xd9, 0x43, 0x4a, 0xfa, 0x97, 0x2d, 0x5f, 0x21, 0xd4, 0xfe,
- /* (2^207)P */ 0x3b, 0x20, 0x29, 0x07, 0x07, 0xb5, 0x78, 0xc3, 0xc7, 0xab, 0x56, 0xba, 0x40, 0xde, 0x1d, 0xcf, 0xc3, 0x00, 0x56, 0x21, 0x0c, 0xc8, 0x42, 0xd9, 0x0e, 0xcd, 0x02, 0x7c, 0x07, 0xb9, 0x11, 0xd7, 0x96, 0xaf, 0xff, 0xad, 0xc5, 0xba, 0x30, 0x6d, 0x82, 0x3a, 0xbf, 0xef, 0x7b, 0xf7, 0x0a, 0x74, 0xbd, 0x31, 0x0c, 0xe4, 0xec, 0x1a, 0xe5, 0xc5,
- /* (2^208)P */ 0xcc, 0xf2, 0x28, 0x16, 0x12, 0xbf, 0xef, 0x85, 0xbc, 0xf7, 0xcb, 0x9f, 0xdb, 0xa8, 0xb2, 0x49, 0x53, 0x48, 0xa8, 0x24, 0xa8, 0x68, 0x8d, 0xbb, 0x21, 0x0a, 0x5a, 0xbd, 0xb2, 0x91, 0x61, 0x47, 0xc4, 0x43, 0x08, 0xa6, 0x19, 0xef, 0x8e, 0x88, 0x39, 0xc6, 0x33, 0x30, 0xf3, 0x0e, 0xc5, 0x92, 0x66, 0xd6, 0xfe, 0xc5, 0x12, 0xd9, 0x4c, 0x2d,
- /* (2^209)P */ 0x30, 0x34, 0x07, 0xbf, 0x9c, 0x5a, 0x4e, 0x65, 0xf1, 0x39, 0x35, 0x38, 0xae, 0x7b, 0x55, 0xac, 0x6a, 0x92, 0x24, 0x7e, 0x50, 0xd3, 0xba, 0x78, 0x51, 0xfe, 0x4d, 0x32, 0x05, 0x11, 0xf5, 0x52, 0xf1, 0x31, 0x45, 0x39, 0x98, 0x7b, 0x28, 0x56, 0xc3, 0x5d, 0x4f, 0x07, 0x6f, 0x84, 0xb8, 0x1a, 0x58, 0x0b, 0xc4, 0x7c, 0xc4, 0x8d, 0x32, 0x8e,
- /* (2^210)P */ 0x7e, 0xaf, 0x98, 0xce, 0xc5, 0x2b, 0x9d, 0xf6, 0xfa, 0x2c, 0xb6, 0x2a, 0x5a, 0x1d, 0xc0, 0x24, 0x8d, 0xa4, 0xce, 0xb1, 0x12, 0x01, 0xf9, 0x79, 0xc6, 0x79, 0x38, 0x0c, 0xd4, 0x07, 0xc9, 0xf7, 0x37, 0xa1, 0x0b, 0xfe, 0x72, 0xec, 0x5d, 0xd6, 0xb0, 0x1c, 0x70, 0xbe, 0x70, 0x01, 0x13, 0xe0, 0x86, 0x95, 0xc7, 0x2e, 0x12, 0x3b, 0xe6, 0xa6,
- /* (2^211)P */ 0x24, 0x82, 0x67, 0xe0, 0x14, 0x7b, 0x56, 0x08, 0x38, 0x44, 0xdb, 0xa0, 0x3a, 0x05, 0x47, 0xb2, 0xc0, 0xac, 0xd1, 0xcc, 0x3f, 0x82, 0xb8, 0x8a, 0x88, 0xbc, 0xf5, 0x33, 0xa1, 0x35, 0x0f, 0xf6, 0xe2, 0xef, 0x6c, 0xf7, 0x37, 0x9e, 0xe8, 0x10, 0xca, 0xb0, 0x8e, 0x80, 0x86, 0x00, 0x23, 0xd0, 0x4a, 0x76, 0x9f, 0xf7, 0x2c, 0x52, 0x15, 0x0e,
- /* (2^212)P */ 0x5e, 0x49, 0xe1, 0x2c, 0x9a, 0x01, 0x76, 0xa6, 0xb3, 0x07, 0x5b, 0xa4, 0x07, 0xef, 0x1d, 0xc3, 0x6a, 0xbb, 0x64, 0xbe, 0x71, 0x15, 0x6e, 0x32, 0x31, 0x46, 0x9a, 0x9e, 0x8f, 0x45, 0x73, 0xce, 0x0b, 0x94, 0x1a, 0x52, 0x07, 0xf4, 0x50, 0x30, 0x49, 0x53, 0x50, 0xfb, 0x71, 0x1f, 0x5a, 0x03, 0xa9, 0x76, 0xf2, 0x8f, 0x42, 0xff, 0xed, 0xed,
- /* (2^213)P */ 0xed, 0x08, 0xdb, 0x91, 0x1c, 0xee, 0xa2, 0xb4, 0x47, 0xa2, 0xfa, 0xcb, 0x03, 0xd1, 0xff, 0x8c, 0xad, 0x64, 0x50, 0x61, 0xcd, 0xfc, 0x88, 0xa0, 0x31, 0x95, 0x30, 0xb9, 0x58, 0xdd, 0xd7, 0x43, 0xe4, 0x46, 0xc2, 0x16, 0xd9, 0x72, 0x4a, 0x56, 0x51, 0x70, 0x85, 0xf1, 0xa1, 0x80, 0x40, 0xd5, 0xba, 0x67, 0x81, 0xda, 0xcd, 0x03, 0xea, 0x51,
- /* (2^214)P */ 0x42, 0x50, 0xf0, 0xef, 0x37, 0x61, 0x72, 0x85, 0xe1, 0xf1, 0xff, 0x6f, 0x3d, 0xe8, 0x7b, 0x21, 0x5c, 0xe5, 0x50, 0x03, 0xde, 0x00, 0xc1, 0xf7, 0x3a, 0x55, 0x12, 0x1c, 0x9e, 0x1e, 0xce, 0xd1, 0x2f, 0xaf, 0x05, 0x70, 0x5b, 0x47, 0xf2, 0x04, 0x7a, 0x89, 0xbc, 0x78, 0xa6, 0x65, 0x6c, 0xaa, 0x3c, 0xa2, 0x3c, 0x8b, 0x5c, 0xa9, 0x22, 0x48,
- /* (2^215)P */ 0x7e, 0x8c, 0x8f, 0x2f, 0x60, 0xe3, 0x5a, 0x94, 0xd4, 0xce, 0xdd, 0x9d, 0x83, 0x3b, 0x77, 0x78, 0x43, 0x1d, 0xfd, 0x8f, 0xc8, 0xe8, 0x02, 0x90, 0xab, 0xf6, 0xc9, 0xfc, 0xf1, 0x63, 0xaa, 0x5f, 0x42, 0xf1, 0x78, 0x34, 0x64, 0x16, 0x75, 0x9c, 0x7d, 0xd0, 0xe4, 0x74, 0x5a, 0xa8, 0xfb, 0xcb, 0xac, 0x20, 0xa3, 0xc2, 0xa6, 0x20, 0xf8, 0x1b,
- /* (2^216)P */ 0x00, 0x4f, 0x1e, 0x56, 0xb5, 0x34, 0xb2, 0x87, 0x31, 0xe5, 0xee, 0x8d, 0xf1, 0x41, 0x67, 0xb7, 0x67, 0x3a, 0x54, 0x86, 0x5c, 0xf0, 0x0b, 0x37, 0x2f, 0x1b, 0x92, 0x5d, 0x58, 0x93, 0xdc, 0xd8, 0x58, 0xcc, 0x9e, 0x67, 0xd0, 0x97, 0x3a, 0xaf, 0x49, 0x39, 0x2d, 0x3b, 0xd8, 0x98, 0xfb, 0x76, 0x6b, 0xe7, 0xaf, 0xc3, 0x45, 0x44, 0x53, 0x94,
- /* (2^217)P */ 0x30, 0xbd, 0x90, 0x75, 0xd3, 0xbd, 0x3b, 0x58, 0x27, 0x14, 0x9f, 0x6b, 0xd4, 0x31, 0x99, 0xcd, 0xde, 0x3a, 0x21, 0x1e, 0xb4, 0x02, 0xe4, 0x33, 0x04, 0x02, 0xb0, 0x50, 0x66, 0x68, 0x90, 0xdd, 0x7b, 0x69, 0x31, 0xd9, 0xcf, 0x68, 0x73, 0xf1, 0x60, 0xdd, 0xc8, 0x1d, 0x5d, 0xe3, 0xd6, 0x5b, 0x2a, 0xa4, 0xea, 0xc4, 0x3f, 0x08, 0xcd, 0x9c,
- /* (2^218)P */ 0x6b, 0x1a, 0xbf, 0x55, 0xc1, 0x1b, 0x0c, 0x05, 0x09, 0xdf, 0xf5, 0x5e, 0xa3, 0x77, 0x95, 0xe9, 0xdf, 0x19, 0xdd, 0xc7, 0x94, 0xcb, 0x06, 0x73, 0xd0, 0x88, 0x02, 0x33, 0x94, 0xca, 0x7a, 0x2f, 0x8e, 0x3d, 0x72, 0x61, 0x2d, 0x4d, 0xa6, 0x61, 0x1f, 0x32, 0x5e, 0x87, 0x53, 0x36, 0x11, 0x15, 0x20, 0xb3, 0x5a, 0x57, 0x51, 0x93, 0x20, 0xd8,
- /* (2^219)P */ 0xb7, 0x56, 0xf4, 0xab, 0x7d, 0x0c, 0xfb, 0x99, 0x1a, 0x30, 0x29, 0xb0, 0x75, 0x2a, 0xf8, 0x53, 0x71, 0x23, 0xbd, 0xa7, 0xd8, 0x0a, 0xe2, 0x27, 0x65, 0xe9, 0x74, 0x26, 0x98, 0x4a, 0x69, 0x19, 0xb2, 0x4d, 0x0a, 0x17, 0x98, 0xb2, 0xa9, 0x57, 0x4e, 0xf6, 0x86, 0xc8, 0x01, 0xa4, 0xc6, 0x98, 0xad, 0x5a, 0x90, 0x2c, 0x05, 0x46, 0x64, 0xb7,
- /* (2^220)P */ 0x7b, 0x91, 0xdf, 0xfc, 0xf8, 0x1c, 0x8c, 0x15, 0x9e, 0xf7, 0xd5, 0xa8, 0xe8, 0xe7, 0xe3, 0xa3, 0xb0, 0x04, 0x74, 0xfa, 0x78, 0xfb, 0x26, 0xbf, 0x67, 0x42, 0xf9, 0x8c, 0x9b, 0xb4, 0x69, 0x5b, 0x02, 0x13, 0x6d, 0x09, 0x6c, 0xd6, 0x99, 0x61, 0x7b, 0x89, 0x4a, 0x67, 0x75, 0xa3, 0x98, 0x13, 0x23, 0x1d, 0x18, 0x24, 0x0e, 0xef, 0x41, 0x79,
- /* (2^221)P */ 0x86, 0x33, 0xab, 0x08, 0xcb, 0xbf, 0x1e, 0x76, 0x3c, 0x0b, 0xbd, 0x30, 0xdb, 0xe9, 0xa3, 0x35, 0x87, 0x1b, 0xe9, 0x07, 0x00, 0x66, 0x7f, 0x3b, 0x35, 0x0c, 0x8a, 0x3f, 0x61, 0xbc, 0xe0, 0xae, 0xf6, 0xcc, 0x54, 0xe1, 0x72, 0x36, 0x2d, 0xee, 0x93, 0x24, 0xf8, 0xd7, 0xc5, 0xf9, 0xcb, 0xb0, 0xe5, 0x88, 0x0d, 0x23, 0x4b, 0x76, 0x15, 0xa2,
- /* (2^222)P */ 0x37, 0xdb, 0x83, 0xd5, 0x6d, 0x06, 0x24, 0x37, 0x1b, 0x15, 0x85, 0x15, 0xe2, 0xc0, 0x4e, 0x02, 0xa9, 0x6d, 0x0a, 0x3a, 0x94, 0x4a, 0x6f, 0x49, 0x00, 0x01, 0x72, 0xbb, 0x60, 0x14, 0x35, 0xae, 0xb4, 0xc6, 0x01, 0x0a, 0x00, 0x9e, 0xc3, 0x58, 0xc5, 0xd1, 0x5e, 0x30, 0x73, 0x96, 0x24, 0x85, 0x9d, 0xf0, 0xf9, 0xec, 0x09, 0xd3, 0xe7, 0x70,
- /* (2^223)P */ 0xf3, 0xbd, 0x96, 0x87, 0xe9, 0x71, 0xbd, 0xd6, 0xa2, 0x45, 0xeb, 0x0a, 0xcd, 0x2c, 0xf1, 0x72, 0xa6, 0x31, 0xa9, 0x6f, 0x09, 0xa1, 0x5e, 0xdd, 0xc8, 0x8d, 0x0d, 0xbc, 0x5a, 0x8d, 0xb1, 0x2c, 0x9a, 0xcc, 0x37, 0x74, 0xc2, 0xa9, 0x4e, 0xd6, 0xc0, 0x3c, 0xa0, 0x23, 0xb0, 0xa0, 0x77, 0x14, 0x80, 0x45, 0x71, 0x6a, 0x2d, 0x41, 0xc3, 0x82,
- /* (2^224)P */ 0x37, 0x44, 0xec, 0x8a, 0x3e, 0xc1, 0x0c, 0xa9, 0x12, 0x9c, 0x08, 0x88, 0xcb, 0xd9, 0xf8, 0xba, 0x00, 0xd6, 0xc3, 0xdf, 0xef, 0x7a, 0x44, 0x7e, 0x25, 0x69, 0xc9, 0xc1, 0x46, 0xe5, 0x20, 0x9e, 0xcc, 0x0b, 0x05, 0x3e, 0xf4, 0x78, 0x43, 0x0c, 0xa6, 0x2f, 0xc1, 0xfa, 0x70, 0xb2, 0x3c, 0x31, 0x7a, 0x63, 0x58, 0xab, 0x17, 0xcf, 0x4c, 0x4f,
- /* (2^225)P */ 0x2b, 0x08, 0x31, 0x59, 0x75, 0x8b, 0xec, 0x0a, 0xa9, 0x79, 0x70, 0xdd, 0xf1, 0x11, 0xc3, 0x11, 0x1f, 0xab, 0x37, 0xaa, 0x26, 0xea, 0x53, 0xc4, 0x79, 0xa7, 0x91, 0x00, 0xaa, 0x08, 0x42, 0xeb, 0x8b, 0x8b, 0xe8, 0xc3, 0x2f, 0xb8, 0x78, 0x90, 0x38, 0x0e, 0x8a, 0x42, 0x0c, 0x0f, 0xbf, 0x3e, 0xf8, 0xd8, 0x07, 0xcf, 0x6a, 0x34, 0xc9, 0xfa,
- /* (2^226)P */ 0x11, 0xe0, 0x76, 0x4d, 0x23, 0xc5, 0xa6, 0xcc, 0x9f, 0x9a, 0x2a, 0xde, 0x3a, 0xb5, 0x92, 0x39, 0x19, 0x8a, 0xf1, 0x8d, 0xf9, 0x4d, 0xc9, 0xb4, 0x39, 0x9f, 0x57, 0xd8, 0x72, 0xab, 0x1d, 0x61, 0x6a, 0xb2, 0xff, 0x52, 0xba, 0x54, 0x0e, 0xfb, 0x83, 0x30, 0x8a, 0xf7, 0x3b, 0xf4, 0xd8, 0xae, 0x1a, 0x94, 0x3a, 0xec, 0x63, 0xfe, 0x6e, 0x7c,
- /* (2^227)P */ 0xdc, 0x70, 0x8e, 0x55, 0x44, 0xbf, 0xd2, 0x6a, 0xa0, 0x14, 0x61, 0x89, 0xd5, 0x55, 0x45, 0x3c, 0xf6, 0x40, 0x0d, 0x83, 0x85, 0x44, 0xb4, 0x62, 0x56, 0xfe, 0x60, 0xd7, 0x07, 0x1d, 0x47, 0x30, 0x3b, 0x73, 0xa4, 0xb5, 0xb7, 0xea, 0xac, 0xda, 0xf1, 0x17, 0xaa, 0x60, 0xdf, 0xe9, 0x84, 0xda, 0x31, 0x32, 0x61, 0xbf, 0xd0, 0x7e, 0x8a, 0x02,
- /* (2^228)P */ 0xb9, 0x51, 0xb3, 0x89, 0x21, 0x5d, 0xa2, 0xfe, 0x79, 0x2a, 0xb3, 0x2a, 0x3b, 0xe6, 0x6f, 0x2b, 0x22, 0x03, 0xea, 0x7b, 0x1f, 0xaf, 0x85, 0xc3, 0x38, 0x55, 0x5b, 0x8e, 0xb4, 0xaa, 0x77, 0xfe, 0x03, 0x6e, 0xda, 0x91, 0x24, 0x0c, 0x48, 0x39, 0x27, 0x43, 0x16, 0xd2, 0x0a, 0x0d, 0x43, 0xa3, 0x0e, 0xca, 0x45, 0xd1, 0x7f, 0xf5, 0xd3, 0x16,
- /* (2^229)P */ 0x3d, 0x32, 0x9b, 0x38, 0xf8, 0x06, 0x93, 0x78, 0x5b, 0x50, 0x2b, 0x06, 0xd8, 0x66, 0xfe, 0xab, 0x9b, 0x58, 0xc7, 0xd1, 0x4d, 0xd5, 0xf8, 0x3b, 0x10, 0x7e, 0x85, 0xde, 0x58, 0x4e, 0xdf, 0x53, 0xd9, 0x58, 0xe0, 0x15, 0x81, 0x9f, 0x1a, 0x78, 0xfc, 0x9f, 0x10, 0xc2, 0x23, 0xd6, 0x78, 0xd1, 0x9d, 0xd2, 0xd5, 0x1c, 0x53, 0xe2, 0xc9, 0x76,
- /* (2^230)P */ 0x98, 0x1e, 0x38, 0x7b, 0x71, 0x18, 0x4b, 0x15, 0xaf, 0xa1, 0xa6, 0x98, 0xcb, 0x26, 0xa3, 0xc8, 0x07, 0x46, 0xda, 0x3b, 0x70, 0x65, 0xec, 0x7a, 0x2b, 0x34, 0x94, 0xa8, 0xb6, 0x14, 0xf8, 0x1a, 0xce, 0xf7, 0xc8, 0x60, 0xf3, 0x88, 0xf4, 0x33, 0x60, 0x7b, 0xd1, 0x02, 0xe7, 0xda, 0x00, 0x4a, 0xea, 0xd2, 0xfd, 0x88, 0xd2, 0x99, 0x28, 0xf3,
- /* (2^231)P */ 0x28, 0x24, 0x1d, 0x26, 0xc2, 0xeb, 0x8b, 0x3b, 0xb4, 0x6b, 0xbe, 0x6b, 0x77, 0xff, 0xf3, 0x21, 0x3b, 0x26, 0x6a, 0x8c, 0x8e, 0x2a, 0x44, 0xa8, 0x01, 0x2b, 0x71, 0xea, 0x64, 0x30, 0xfd, 0xfd, 0x95, 0xcb, 0x39, 0x38, 0x48, 0xfa, 0x96, 0x97, 0x8c, 0x2f, 0x33, 0xca, 0x03, 0xe6, 0xd7, 0x94, 0x55, 0x6c, 0xc3, 0xb3, 0xa8, 0xf7, 0xae, 0x8c,
- /* (2^232)P */ 0xea, 0x62, 0x8a, 0xb4, 0xeb, 0x74, 0xf7, 0xb8, 0xae, 0xc5, 0x20, 0x71, 0x06, 0xd6, 0x7c, 0x62, 0x9b, 0x69, 0x74, 0xef, 0xa7, 0x6d, 0xd6, 0x8c, 0x37, 0xb9, 0xbf, 0xcf, 0xeb, 0xe4, 0x2f, 0x04, 0x02, 0x21, 0x7d, 0x75, 0x6b, 0x92, 0x48, 0xf8, 0x70, 0xad, 0x69, 0xe2, 0xea, 0x0e, 0x88, 0x67, 0x72, 0xcc, 0x2d, 0x10, 0xce, 0x2d, 0xcf, 0x65,
- /* (2^233)P */ 0x49, 0xf3, 0x57, 0x64, 0xe5, 0x5c, 0xc5, 0x65, 0x49, 0x97, 0xc4, 0x8a, 0xcc, 0xa9, 0xca, 0x94, 0x7b, 0x86, 0x88, 0xb6, 0x51, 0x27, 0x69, 0xa5, 0x0f, 0x8b, 0x06, 0x59, 0xa0, 0x94, 0xef, 0x63, 0x1a, 0x01, 0x9e, 0x4f, 0xd2, 0x5a, 0x93, 0xc0, 0x7c, 0xe6, 0x61, 0x77, 0xb6, 0xf5, 0x40, 0xd9, 0x98, 0x43, 0x5b, 0x56, 0x68, 0xe9, 0x37, 0x8f,
- /* (2^234)P */ 0xee, 0x87, 0xd2, 0x05, 0x1b, 0x39, 0x89, 0x10, 0x07, 0x6d, 0xe8, 0xfd, 0x8b, 0x4d, 0xb2, 0xa7, 0x7b, 0x1e, 0xa0, 0x6c, 0x0d, 0x3d, 0x3d, 0x49, 0xba, 0x61, 0x36, 0x1f, 0xc2, 0x84, 0x4a, 0xcc, 0x87, 0xa9, 0x1b, 0x23, 0x04, 0xe2, 0x3e, 0x97, 0xe1, 0xdb, 0xd5, 0x5a, 0xe8, 0x41, 0x6b, 0xe5, 0x5a, 0xa1, 0x99, 0xe5, 0x7b, 0xa7, 0xe0, 0x3b,
- /* (2^235)P */ 0xea, 0xa3, 0x6a, 0xdd, 0x77, 0x7f, 0x77, 0x41, 0xc5, 0x6a, 0xe4, 0xaf, 0x11, 0x5f, 0x88, 0xa5, 0x10, 0xee, 0xd0, 0x8c, 0x0c, 0xb4, 0xa5, 0x2a, 0xd0, 0xd8, 0x1d, 0x47, 0x06, 0xc0, 0xd5, 0xce, 0x51, 0x54, 0x9b, 0x2b, 0xe6, 0x2f, 0xe7, 0xe7, 0x31, 0x5f, 0x5c, 0x23, 0x81, 0x3e, 0x03, 0x93, 0xaa, 0x2d, 0x71, 0x84, 0xa0, 0x89, 0x32, 0xa6,
- /* (2^236)P */ 0x55, 0xa3, 0x13, 0x92, 0x4e, 0x93, 0x7d, 0xec, 0xca, 0x57, 0xfb, 0x37, 0xae, 0xd2, 0x18, 0x2e, 0x54, 0x05, 0x6c, 0xd1, 0x28, 0xca, 0x90, 0x40, 0x82, 0x2e, 0x79, 0xc6, 0x5a, 0xc7, 0xdd, 0x84, 0x93, 0xdf, 0x15, 0xb8, 0x1f, 0xb1, 0xf9, 0xaf, 0x2c, 0xe5, 0x32, 0xcd, 0xc2, 0x99, 0x6d, 0xac, 0x85, 0x5c, 0x63, 0xd3, 0xe2, 0xff, 0x24, 0xda,
- /* (2^237)P */ 0x2d, 0x8d, 0xfd, 0x65, 0xcc, 0xe5, 0x02, 0xa0, 0xe5, 0xb9, 0xec, 0x59, 0x09, 0x50, 0x27, 0xb7, 0x3d, 0x2a, 0x79, 0xb2, 0x76, 0x5d, 0x64, 0x95, 0xf8, 0xc5, 0xaf, 0x8a, 0x62, 0x11, 0x5c, 0x56, 0x1c, 0x05, 0x64, 0x9e, 0x5e, 0xbd, 0x54, 0x04, 0xe6, 0x9e, 0xab, 0xe6, 0x22, 0x7e, 0x42, 0x54, 0xb5, 0xa5, 0xd0, 0x8d, 0x28, 0x6b, 0x0f, 0x0b,
- /* (2^238)P */ 0x2d, 0xb2, 0x8c, 0x59, 0x10, 0x37, 0x84, 0x3b, 0x9b, 0x65, 0x1b, 0x0f, 0x10, 0xf9, 0xea, 0x60, 0x1b, 0x02, 0xf5, 0xee, 0x8b, 0xe6, 0x32, 0x7d, 0x10, 0x7f, 0x5f, 0x8c, 0x72, 0x09, 0x4e, 0x1f, 0x29, 0xff, 0x65, 0xcb, 0x3e, 0x3a, 0xd2, 0x96, 0x50, 0x1e, 0xea, 0x64, 0x99, 0xb5, 0x4c, 0x7a, 0x69, 0xb8, 0x95, 0xae, 0x48, 0xc0, 0x7c, 0xb1,
- /* (2^239)P */ 0xcd, 0x7c, 0x4f, 0x3e, 0xea, 0xf3, 0x90, 0xcb, 0x12, 0x76, 0xd1, 0x17, 0xdc, 0x0d, 0x13, 0x0f, 0xfd, 0x4d, 0xb5, 0x1f, 0xe4, 0xdd, 0xf2, 0x4d, 0x58, 0xea, 0xa5, 0x66, 0x92, 0xcf, 0xe5, 0x54, 0xea, 0x9b, 0x35, 0x83, 0x1a, 0x44, 0x8e, 0x62, 0x73, 0x45, 0x98, 0xa3, 0x89, 0x95, 0x52, 0x93, 0x1a, 0x8d, 0x63, 0x0f, 0xc2, 0x57, 0x3c, 0xb1,
- /* (2^240)P */ 0x72, 0xb4, 0xdf, 0x51, 0xb7, 0xf6, 0x52, 0xa2, 0x14, 0x56, 0xe5, 0x0a, 0x2e, 0x75, 0x81, 0x02, 0xee, 0x93, 0x48, 0x0a, 0x92, 0x4e, 0x0c, 0x0f, 0xdf, 0x09, 0x89, 0x99, 0xf6, 0xf9, 0x22, 0xa2, 0x32, 0xf8, 0xb0, 0x76, 0x0c, 0xb2, 0x4d, 0x6e, 0xbe, 0x83, 0x35, 0x61, 0x44, 0xd2, 0x58, 0xc7, 0xdd, 0x14, 0xcf, 0xc3, 0x4b, 0x7c, 0x07, 0xee,
- /* (2^241)P */ 0x8b, 0x03, 0xee, 0xcb, 0xa7, 0x2e, 0x28, 0xbd, 0x97, 0xd1, 0x4c, 0x2b, 0xd1, 0x92, 0x67, 0x5b, 0x5a, 0x12, 0xbf, 0x29, 0x17, 0xfc, 0x50, 0x09, 0x74, 0x76, 0xa2, 0xd4, 0x82, 0xfd, 0x2c, 0x0c, 0x90, 0xf7, 0xe7, 0xe5, 0x9a, 0x2c, 0x16, 0x40, 0xb9, 0x6c, 0xd9, 0xe0, 0x22, 0x9e, 0xf8, 0xdd, 0x73, 0xe4, 0x7b, 0x9e, 0xbe, 0x4f, 0x66, 0x22,
- /* (2^242)P */ 0xa4, 0x10, 0xbe, 0xb8, 0x83, 0x3a, 0x77, 0x8e, 0xea, 0x0a, 0xc4, 0x97, 0x3e, 0xb6, 0x6c, 0x81, 0xd7, 0x65, 0xd9, 0xf7, 0xae, 0xe6, 0xbe, 0xab, 0x59, 0x81, 0x29, 0x4b, 0xff, 0xe1, 0x0f, 0xc3, 0x2b, 0xad, 0x4b, 0xef, 0xc4, 0x50, 0x9f, 0x88, 0x31, 0xf2, 0xde, 0x80, 0xd6, 0xf4, 0x20, 0x9c, 0x77, 0x9b, 0xbe, 0xbe, 0x08, 0xf5, 0xf0, 0x95,
- /* (2^243)P */ 0x0e, 0x7c, 0x7b, 0x7c, 0xb3, 0xd8, 0x83, 0xfc, 0x8c, 0x75, 0x51, 0x74, 0x1b, 0xe1, 0x6d, 0x11, 0x05, 0x46, 0x24, 0x0d, 0xa4, 0x2b, 0x32, 0xfd, 0x2c, 0x4e, 0x21, 0xdf, 0x39, 0x6b, 0x96, 0xfc, 0xff, 0x92, 0xfc, 0x35, 0x0d, 0x9a, 0x4b, 0xc0, 0x70, 0x46, 0x32, 0x7d, 0xc0, 0xc4, 0x04, 0xe0, 0x2d, 0x83, 0xa7, 0x00, 0xc7, 0xcb, 0xb4, 0x8f,
- /* (2^244)P */ 0xa9, 0x5a, 0x7f, 0x0e, 0xdd, 0x2c, 0x85, 0xaa, 0x4d, 0xac, 0xde, 0xb3, 0xb6, 0xaf, 0xe6, 0xd1, 0x06, 0x7b, 0x2c, 0xa4, 0x01, 0x19, 0x22, 0x7d, 0x78, 0xf0, 0x3a, 0xea, 0x89, 0xfe, 0x21, 0x61, 0x6d, 0xb8, 0xfe, 0xa5, 0x2a, 0xab, 0x0d, 0x7b, 0x51, 0x39, 0xb6, 0xde, 0xbc, 0xf0, 0xc5, 0x48, 0xd7, 0x09, 0x82, 0x6e, 0x66, 0x75, 0xc5, 0xcd,
- /* (2^245)P */ 0xee, 0xdf, 0x2b, 0x6c, 0xa8, 0xde, 0x61, 0xe1, 0x27, 0xfa, 0x2a, 0x0f, 0x68, 0xe7, 0x7a, 0x9b, 0x13, 0xe9, 0x56, 0xd2, 0x1c, 0x3d, 0x2f, 0x3c, 0x7a, 0xf6, 0x6f, 0x45, 0xee, 0xe8, 0xf4, 0xa0, 0xa6, 0xe8, 0xa5, 0x27, 0xee, 0xf2, 0x85, 0xa9, 0xd5, 0x0e, 0xa9, 0x26, 0x60, 0xfe, 0xee, 0xc7, 0x59, 0x99, 0x5e, 0xa3, 0xdf, 0x23, 0x36, 0xd5,
- /* (2^246)P */ 0x15, 0x66, 0x6f, 0xd5, 0x78, 0xa4, 0x0a, 0xf7, 0xb1, 0xe8, 0x75, 0x6b, 0x48, 0x7d, 0xa6, 0x4d, 0x3d, 0x36, 0x9b, 0xc7, 0xcc, 0x68, 0x9a, 0xfe, 0x2f, 0x39, 0x2a, 0x51, 0x31, 0x39, 0x7d, 0x73, 0x6f, 0xc8, 0x74, 0x72, 0x6f, 0x6e, 0xda, 0x5f, 0xad, 0x48, 0xc8, 0x40, 0xe1, 0x06, 0x01, 0x36, 0xa1, 0x88, 0xc8, 0x99, 0x9c, 0xd1, 0x11, 0x8f,
- /* (2^247)P */ 0xab, 0xc5, 0xcb, 0xcf, 0xbd, 0x73, 0x21, 0xd0, 0x82, 0xb1, 0x2e, 0x2d, 0xd4, 0x36, 0x1b, 0xed, 0xa9, 0x8a, 0x26, 0x79, 0xc4, 0x17, 0xae, 0xe5, 0x09, 0x0a, 0x0c, 0xa4, 0x21, 0xa0, 0x6e, 0xdd, 0x62, 0x8e, 0x44, 0x62, 0xcc, 0x50, 0xff, 0x93, 0xb3, 0x9a, 0x72, 0x8c, 0x3f, 0xa1, 0xa6, 0x4d, 0x87, 0xd5, 0x1c, 0x5a, 0xc0, 0x0b, 0x1a, 0xd6,
- /* (2^248)P */ 0x67, 0x36, 0x6a, 0x1f, 0x96, 0xe5, 0x80, 0x20, 0xa9, 0xe8, 0x0b, 0x0e, 0x21, 0x29, 0x3f, 0xc8, 0x0a, 0x6d, 0x27, 0x47, 0xca, 0xd9, 0x05, 0x55, 0xbf, 0x11, 0xcf, 0x31, 0x7a, 0x37, 0xc7, 0x90, 0xa9, 0xf4, 0x07, 0x5e, 0xd5, 0xc3, 0x92, 0xaa, 0x95, 0xc8, 0x23, 0x2a, 0x53, 0x45, 0xe3, 0x3a, 0x24, 0xe9, 0x67, 0x97, 0x3a, 0x82, 0xf9, 0xa6,
- /* (2^249)P */ 0x92, 0x9e, 0x6d, 0x82, 0x67, 0xe9, 0xf9, 0x17, 0x96, 0x2c, 0xa7, 0xd3, 0x89, 0xf9, 0xdb, 0xd8, 0x20, 0xc6, 0x2e, 0xec, 0x4a, 0x76, 0x64, 0xbf, 0x27, 0x40, 0xe2, 0xb4, 0xdf, 0x1f, 0xa0, 0xef, 0x07, 0x80, 0xfb, 0x8e, 0x12, 0xf8, 0xb8, 0xe1, 0xc6, 0xdf, 0x7c, 0x69, 0x35, 0x5a, 0xe1, 0x8e, 0x5d, 0x69, 0x84, 0x56, 0xb6, 0x31, 0x1c, 0x0b,
- /* (2^250)P */ 0xd6, 0x94, 0x5c, 0xef, 0xbb, 0x46, 0x45, 0x44, 0x5b, 0xa1, 0xae, 0x03, 0x65, 0xdd, 0xb5, 0x66, 0x88, 0x35, 0x29, 0x95, 0x16, 0x54, 0xa6, 0xf5, 0xc9, 0x78, 0x34, 0xe6, 0x0f, 0xc4, 0x2b, 0x5b, 0x79, 0x51, 0x68, 0x48, 0x3a, 0x26, 0x87, 0x05, 0x70, 0xaf, 0x8b, 0xa6, 0xc7, 0x2e, 0xb3, 0xa9, 0x10, 0x01, 0xb0, 0xb9, 0x31, 0xfd, 0xdc, 0x80,
- /* (2^251)P */ 0x25, 0xf2, 0xad, 0xd6, 0x75, 0xa3, 0x04, 0x05, 0x64, 0x8a, 0x97, 0x60, 0x27, 0x2a, 0xe5, 0x6d, 0xb0, 0x73, 0xf4, 0x07, 0x2a, 0x9d, 0xe9, 0x46, 0xb4, 0x1c, 0x51, 0xf8, 0x63, 0x98, 0x7e, 0xe5, 0x13, 0x51, 0xed, 0x98, 0x65, 0x98, 0x4f, 0x8f, 0xe7, 0x7e, 0x72, 0xd7, 0x64, 0x11, 0x2f, 0xcd, 0x12, 0xf8, 0xc4, 0x63, 0x52, 0x0f, 0x7f, 0xc4,
- /* (2^252)P */ 0x5c, 0xd9, 0x85, 0x63, 0xc7, 0x8a, 0x65, 0x9a, 0x25, 0x83, 0x31, 0x73, 0x49, 0xf0, 0x93, 0x96, 0x70, 0x67, 0x6d, 0xb1, 0xff, 0x95, 0x54, 0xe4, 0xf8, 0x15, 0x6c, 0x5f, 0xbd, 0xf6, 0x0f, 0x38, 0x7b, 0x68, 0x7d, 0xd9, 0x3d, 0xf0, 0xa9, 0xa0, 0xe4, 0xd1, 0xb6, 0x34, 0x6d, 0x14, 0x16, 0xc2, 0x4c, 0x30, 0x0e, 0x67, 0xd3, 0xbe, 0x2e, 0xc0,
- /* (2^253)P */ 0x06, 0x6b, 0x52, 0xc8, 0x14, 0xcd, 0xae, 0x03, 0x93, 0xea, 0xc1, 0xf2, 0xf6, 0x8b, 0xc5, 0xb6, 0xdc, 0x82, 0x42, 0x29, 0x94, 0xe0, 0x25, 0x6c, 0x3f, 0x9f, 0x5d, 0xe4, 0x96, 0xf6, 0x8e, 0x3f, 0xf9, 0x72, 0xc4, 0x77, 0x60, 0x8b, 0xa4, 0xf9, 0xa8, 0xc3, 0x0a, 0x81, 0xb1, 0x97, 0x70, 0x18, 0xab, 0xea, 0x37, 0x8a, 0x08, 0xc7, 0xe2, 0x95,
- /* (2^254)P */ 0x94, 0x49, 0xd9, 0x5f, 0x76, 0x72, 0x82, 0xad, 0x2d, 0x50, 0x1a, 0x7a, 0x5b, 0xe6, 0x95, 0x1e, 0x95, 0x65, 0x87, 0x1c, 0x52, 0xd7, 0x44, 0xe6, 0x9b, 0x56, 0xcd, 0x6f, 0x05, 0xff, 0x67, 0xc5, 0xdb, 0xa2, 0xac, 0xe4, 0xa2, 0x28, 0x63, 0x5f, 0xfb, 0x0c, 0x3b, 0xf1, 0x87, 0xc3, 0x36, 0x78, 0x3f, 0x77, 0xfa, 0x50, 0x85, 0xf9, 0xd7, 0x82,
- /* (2^255)P */ 0x64, 0xc0, 0xe0, 0xd8, 0x2d, 0xed, 0xcb, 0x6a, 0xfd, 0xcd, 0xbc, 0x7e, 0x9f, 0xc8, 0x85, 0xe9, 0xc1, 0x7c, 0x0f, 0xe5, 0x18, 0xea, 0xd4, 0x51, 0xad, 0x59, 0x13, 0x75, 0xd9, 0x3d, 0xd4, 0x8a, 0xb2, 0xbe, 0x78, 0x52, 0x2b, 0x52, 0x94, 0x37, 0x41, 0xd6, 0xb4, 0xb6, 0x45, 0x20, 0x76, 0xe0, 0x1f, 0x31, 0xdb, 0xb1, 0xa1, 0x43, 0xf0, 0x18,
- /* (2^256)P */ 0x74, 0xa9, 0xa4, 0xa9, 0xdd, 0x6e, 0x3e, 0x68, 0xe5, 0xc3, 0x2e, 0x92, 0x17, 0xa4, 0xcb, 0x80, 0xb1, 0xf0, 0x06, 0x93, 0xef, 0xe6, 0x00, 0xe6, 0x3b, 0xb1, 0x32, 0x65, 0x7b, 0x83, 0xb6, 0x8a, 0x49, 0x1b, 0x14, 0x89, 0xee, 0xba, 0xf5, 0x6a, 0x8d, 0x36, 0xef, 0xb0, 0xd8, 0xb2, 0x16, 0x99, 0x17, 0x35, 0x02, 0x16, 0x55, 0x58, 0xdd, 0x82,
- /* (2^257)P */ 0x36, 0x95, 0xe8, 0xf4, 0x36, 0x42, 0xbb, 0xc5, 0x3e, 0xfa, 0x30, 0x84, 0x9e, 0x59, 0xfd, 0xd2, 0x95, 0x42, 0xf8, 0x64, 0xd9, 0xb9, 0x0e, 0x9f, 0xfa, 0xd0, 0x7b, 0x20, 0x31, 0x77, 0x48, 0x29, 0x4d, 0xd0, 0x32, 0x57, 0x56, 0x30, 0xa6, 0x17, 0x53, 0x04, 0xbf, 0x08, 0x28, 0xec, 0xb8, 0x46, 0xc1, 0x03, 0x89, 0xdc, 0xed, 0xa0, 0x35, 0x53,
- /* (2^258)P */ 0xc5, 0x7f, 0x9e, 0xd8, 0xc5, 0xba, 0x5f, 0x68, 0xc8, 0x23, 0x75, 0xea, 0x0d, 0xd9, 0x5a, 0xfd, 0x61, 0x1a, 0xa3, 0x2e, 0x45, 0x63, 0x14, 0x55, 0x86, 0x21, 0x29, 0xbe, 0xef, 0x5e, 0x50, 0xe5, 0x18, 0x59, 0xe7, 0xe3, 0xce, 0x4d, 0x8c, 0x15, 0x8f, 0x89, 0x66, 0x44, 0x52, 0x3d, 0xfa, 0xc7, 0x9a, 0x59, 0x90, 0x8e, 0xc0, 0x06, 0x3f, 0xc9,
- /* (2^259)P */ 0x8e, 0x04, 0xd9, 0x16, 0x50, 0x1d, 0x8c, 0x9f, 0xd5, 0xe3, 0xce, 0xfd, 0x47, 0x04, 0x27, 0x4d, 0xc2, 0xfa, 0x71, 0xd9, 0x0b, 0xb8, 0x65, 0xf4, 0x11, 0xf3, 0x08, 0xee, 0x81, 0xc8, 0x67, 0x99, 0x0b, 0x8d, 0x77, 0xa3, 0x4f, 0xb5, 0x9b, 0xdb, 0x26, 0xf1, 0x97, 0xeb, 0x04, 0x54, 0xeb, 0x80, 0x08, 0x1d, 0x1d, 0xf6, 0x3d, 0x1f, 0x5a, 0xb8,
- /* (2^260)P */ 0xb7, 0x9c, 0x9d, 0xee, 0xb9, 0x5c, 0xad, 0x0d, 0x9e, 0xfd, 0x60, 0x3c, 0x27, 0x4e, 0xa2, 0x95, 0xfb, 0x64, 0x7e, 0x79, 0x64, 0x87, 0x10, 0xb4, 0x73, 0xe0, 0x9d, 0x46, 0x4d, 0x3d, 0xee, 0x83, 0xe4, 0x16, 0x88, 0x97, 0xe6, 0x4d, 0xba, 0x70, 0xb6, 0x96, 0x7b, 0xff, 0x4b, 0xc8, 0xcf, 0x72, 0x83, 0x3e, 0x5b, 0x24, 0x2e, 0x57, 0xf1, 0x82,
- /* (2^261)P */ 0x30, 0x71, 0x40, 0x51, 0x4f, 0x44, 0xbb, 0xc7, 0xf0, 0x54, 0x6e, 0x9d, 0xeb, 0x15, 0xad, 0xf8, 0x61, 0x43, 0x5a, 0xef, 0xc0, 0xb1, 0x57, 0xae, 0x03, 0x40, 0xe8, 0x68, 0x6f, 0x03, 0x20, 0x4f, 0x8a, 0x51, 0x2a, 0x9e, 0xd2, 0x45, 0xaf, 0xb4, 0xf5, 0xd4, 0x95, 0x7f, 0x3d, 0x3d, 0xb7, 0xb6, 0x28, 0xc5, 0x08, 0x8b, 0x44, 0xd6, 0x3f, 0xe7,
- /* (2^262)P */ 0xa9, 0x52, 0x04, 0x67, 0xcb, 0x20, 0x63, 0xf8, 0x18, 0x01, 0x44, 0x21, 0x6a, 0x8a, 0x83, 0x48, 0xd4, 0xaf, 0x23, 0x0f, 0x35, 0x8d, 0xe5, 0x5a, 0xc4, 0x7c, 0x55, 0x46, 0x19, 0x5f, 0x35, 0xe0, 0x5d, 0x97, 0x4c, 0x2d, 0x04, 0xed, 0x59, 0xd4, 0xb0, 0xb2, 0xc6, 0xe3, 0x51, 0xe1, 0x38, 0xc6, 0x30, 0x49, 0x8f, 0xae, 0x61, 0x64, 0xce, 0xa8,
- /* (2^263)P */ 0x9b, 0x64, 0x83, 0x3c, 0xd3, 0xdf, 0xb9, 0x27, 0xe7, 0x5b, 0x7f, 0xeb, 0xf3, 0x26, 0xcf, 0xb1, 0x8f, 0xaf, 0x26, 0xc8, 0x48, 0xce, 0xa1, 0xac, 0x7d, 0x10, 0x34, 0x28, 0xe1, 0x1f, 0x69, 0x03, 0x64, 0x77, 0x61, 0xdd, 0x4a, 0x9b, 0x18, 0x47, 0xf8, 0xca, 0x63, 0xc9, 0x03, 0x2d, 0x20, 0x2a, 0x69, 0x6e, 0x42, 0xd0, 0xe7, 0xaa, 0xb5, 0xf3,
- /* (2^264)P */ 0xea, 0x31, 0x0c, 0x57, 0x0f, 0x3e, 0xe3, 0x35, 0xd8, 0x30, 0xa5, 0x6f, 0xdd, 0x95, 0x43, 0xc6, 0x66, 0x07, 0x4f, 0x34, 0xc3, 0x7e, 0x04, 0x10, 0x2d, 0xc4, 0x1c, 0x94, 0x52, 0x2e, 0x5b, 0x9a, 0x65, 0x2f, 0x91, 0xaa, 0x4f, 0x3c, 0xdc, 0x23, 0x18, 0xe1, 0x4f, 0x85, 0xcd, 0xf4, 0x8c, 0x51, 0xf7, 0xab, 0x4f, 0xdc, 0x15, 0x5c, 0x9e, 0xc5,
- /* (2^265)P */ 0x54, 0x57, 0x23, 0x17, 0xe7, 0x82, 0x2f, 0x04, 0x7d, 0xfe, 0xe7, 0x1f, 0xa2, 0x57, 0x79, 0xe9, 0x58, 0x9b, 0xbe, 0xc6, 0x16, 0x4a, 0x17, 0x50, 0x90, 0x4a, 0x34, 0x70, 0x87, 0x37, 0x01, 0x26, 0xd8, 0xa3, 0x5f, 0x07, 0x7c, 0xd0, 0x7d, 0x05, 0x8a, 0x93, 0x51, 0x2f, 0x99, 0xea, 0xcf, 0x00, 0xd8, 0xc7, 0xe6, 0x9b, 0x8c, 0x62, 0x45, 0x87,
- /* (2^266)P */ 0xc3, 0xfd, 0x29, 0x66, 0xe7, 0x30, 0x29, 0x77, 0xe0, 0x0d, 0x63, 0x5b, 0xe6, 0x90, 0x1a, 0x1e, 0x99, 0xc2, 0xa7, 0xab, 0xff, 0xa7, 0xbd, 0x79, 0x01, 0x97, 0xfd, 0x27, 0x1b, 0x43, 0x2b, 0xe6, 0xfe, 0x5e, 0xf1, 0xb9, 0x35, 0x38, 0x08, 0x25, 0x55, 0x90, 0x68, 0x2e, 0xc3, 0x67, 0x39, 0x9f, 0x2b, 0x2c, 0x70, 0x48, 0x8c, 0x47, 0xee, 0x56,
- /* (2^267)P */ 0xf7, 0x32, 0x70, 0xb5, 0xe6, 0x42, 0xfd, 0x0a, 0x39, 0x9b, 0x07, 0xfe, 0x0e, 0xf4, 0x47, 0xba, 0x6a, 0x3f, 0xf5, 0x2c, 0x15, 0xf3, 0x60, 0x3f, 0xb1, 0x83, 0x7b, 0x2e, 0x34, 0x58, 0x1a, 0x6e, 0x4a, 0x49, 0x05, 0x45, 0xca, 0xdb, 0x00, 0x01, 0x0c, 0x42, 0x5e, 0x60, 0x40, 0x5f, 0xd9, 0xc7, 0x3a, 0x9e, 0x1c, 0x8d, 0xab, 0x11, 0x55, 0x65,
- /* (2^268)P */ 0x87, 0x40, 0xb7, 0x0d, 0xaa, 0x34, 0x89, 0x90, 0x75, 0x6d, 0xa2, 0xfe, 0x3b, 0x6d, 0x5c, 0x39, 0x98, 0x10, 0x9e, 0x15, 0xc5, 0x35, 0xa2, 0x27, 0x23, 0x0a, 0x2d, 0x60, 0xe2, 0xa8, 0x7f, 0x3e, 0x77, 0x8f, 0xcc, 0x44, 0xcc, 0x30, 0x28, 0xe2, 0xf0, 0x04, 0x8c, 0xee, 0xe4, 0x5f, 0x68, 0x8c, 0xdf, 0x70, 0xbf, 0x31, 0xee, 0x2a, 0xfc, 0xce,
- /* (2^269)P */ 0x92, 0xf2, 0xa0, 0xd9, 0x58, 0x3b, 0x7c, 0x1a, 0x99, 0x46, 0x59, 0x54, 0x60, 0x06, 0x8d, 0x5e, 0xf0, 0x22, 0xa1, 0xed, 0x92, 0x8a, 0x4d, 0x76, 0x95, 0x05, 0x0b, 0xff, 0xfc, 0x9a, 0xd1, 0xcc, 0x05, 0xb9, 0x5e, 0x99, 0xe8, 0x2a, 0x76, 0x7b, 0xfd, 0xa6, 0xe2, 0xd1, 0x1a, 0xd6, 0x76, 0x9f, 0x2f, 0x0e, 0xd1, 0xa8, 0x77, 0x5a, 0x40, 0x5a,
- /* (2^270)P */ 0xff, 0xf9, 0x3f, 0xa9, 0xa6, 0x6c, 0x6d, 0x03, 0x8b, 0xa7, 0x10, 0x5d, 0x3f, 0xec, 0x3e, 0x1c, 0x0b, 0x6b, 0xa2, 0x6a, 0x22, 0xa9, 0x28, 0xd0, 0x66, 0xc9, 0xc2, 0x3d, 0x47, 0x20, 0x7d, 0xa6, 0x1d, 0xd8, 0x25, 0xb5, 0xf2, 0xf9, 0x70, 0x19, 0x6b, 0xf8, 0x43, 0x36, 0xc5, 0x1f, 0xe4, 0x5a, 0x4c, 0x13, 0xe4, 0x6d, 0x08, 0x0b, 0x1d, 0xb1,
- /* (2^271)P */ 0x3f, 0x20, 0x9b, 0xfb, 0xec, 0x7d, 0x31, 0xc5, 0xfc, 0x88, 0x0b, 0x30, 0xed, 0x36, 0xc0, 0x63, 0xb1, 0x7d, 0x10, 0xda, 0xb6, 0x2e, 0xad, 0xf3, 0xec, 0x94, 0xe7, 0xec, 0xb5, 0x9c, 0xfe, 0xf5, 0x35, 0xf0, 0xa2, 0x2d, 0x7f, 0xca, 0x6b, 0x67, 0x1a, 0xf6, 0xb3, 0xda, 0x09, 0x2a, 0xaa, 0xdf, 0xb1, 0xca, 0x9b, 0xfb, 0xeb, 0xb3, 0xcd, 0xc0,
- /* (2^272)P */ 0xcd, 0x4d, 0x89, 0x00, 0xa4, 0x3b, 0x48, 0xf0, 0x76, 0x91, 0x35, 0xa5, 0xf8, 0xc9, 0xb6, 0x46, 0xbc, 0xf6, 0x9a, 0x45, 0x47, 0x17, 0x96, 0x80, 0x5b, 0x3a, 0x28, 0x33, 0xf9, 0x5a, 0xef, 0x43, 0x07, 0xfe, 0x3b, 0xf4, 0x8e, 0x19, 0xce, 0xd2, 0x94, 0x4b, 0x6d, 0x8e, 0x67, 0x20, 0xc7, 0x4f, 0x2f, 0x59, 0x8e, 0xe1, 0xa1, 0xa9, 0xf9, 0x0e,
- /* (2^273)P */ 0xdc, 0x7b, 0xb5, 0x50, 0x2e, 0xe9, 0x7e, 0x8b, 0x78, 0xa1, 0x38, 0x96, 0x22, 0xc3, 0x61, 0x67, 0x6d, 0xc8, 0x58, 0xed, 0x41, 0x1d, 0x5d, 0x86, 0x98, 0x7f, 0x2f, 0x1b, 0x8d, 0x3e, 0xaa, 0xc1, 0xd2, 0x0a, 0xf3, 0xbf, 0x95, 0x04, 0xf3, 0x10, 0x3c, 0x2b, 0x7f, 0x90, 0x46, 0x04, 0xaa, 0x6a, 0xa9, 0x35, 0x76, 0xac, 0x49, 0xb5, 0x00, 0x45,
- /* (2^274)P */ 0xb1, 0x93, 0x79, 0x84, 0x4a, 0x2a, 0x30, 0x78, 0x16, 0xaa, 0xc5, 0x74, 0x06, 0xce, 0xa5, 0xa7, 0x32, 0x86, 0xe0, 0xf9, 0x10, 0xd2, 0x58, 0x76, 0xfb, 0x66, 0x49, 0x76, 0x3a, 0x90, 0xba, 0xb5, 0xcc, 0x99, 0xcd, 0x09, 0xc1, 0x9a, 0x74, 0x23, 0xdf, 0x0c, 0xfe, 0x99, 0x52, 0x80, 0xa3, 0x7c, 0x1c, 0x71, 0x5f, 0x2c, 0x49, 0x57, 0xf4, 0xf9,
- /* (2^275)P */ 0x6d, 0xbf, 0x52, 0xe6, 0x25, 0x98, 0xed, 0xcf, 0xe3, 0xbc, 0x08, 0xa2, 0x1a, 0x90, 0xae, 0xa0, 0xbf, 0x07, 0x15, 0xad, 0x0a, 0x9f, 0x3e, 0x47, 0x44, 0xc2, 0x10, 0x46, 0xa6, 0x7a, 0x9e, 0x2f, 0x57, 0xbc, 0xe2, 0xf0, 0x1d, 0xd6, 0x9a, 0x06, 0xed, 0xfc, 0x54, 0x95, 0x92, 0x15, 0xa2, 0xf7, 0x8d, 0x6b, 0xef, 0xb2, 0x05, 0xed, 0x5c, 0x63,
- /* (2^276)P */ 0xbc, 0x0b, 0x27, 0x3a, 0x3a, 0xf8, 0xe1, 0x48, 0x02, 0x7e, 0x27, 0xe6, 0x81, 0x62, 0x07, 0x73, 0x74, 0xe5, 0x52, 0xd7, 0xf8, 0x26, 0xca, 0x93, 0x4d, 0x3e, 0x9b, 0x55, 0x09, 0x8e, 0xe3, 0xd7, 0xa6, 0xe3, 0xb6, 0x2a, 0xa9, 0xb3, 0xb0, 0xa0, 0x8c, 0x01, 0xbb, 0x07, 0x90, 0x78, 0x6d, 0x6d, 0xe9, 0xf0, 0x7a, 0x90, 0xbd, 0xdc, 0x0c, 0x36,
- /* (2^277)P */ 0x7f, 0x20, 0x12, 0x0f, 0x40, 0x00, 0x53, 0xd8, 0x0c, 0x27, 0x47, 0x47, 0x22, 0x80, 0xfb, 0x62, 0xe4, 0xa7, 0xf7, 0xbd, 0x42, 0xa5, 0xc3, 0x2b, 0xb2, 0x7f, 0x50, 0xcc, 0xe2, 0xfb, 0xd5, 0xc0, 0x63, 0xdd, 0x24, 0x5f, 0x7c, 0x08, 0x91, 0xbf, 0x6e, 0x47, 0x44, 0xd4, 0x6a, 0xc0, 0xc3, 0x09, 0x39, 0x27, 0xdd, 0xc7, 0xca, 0x06, 0x29, 0x55,
- /* (2^278)P */ 0x76, 0x28, 0x58, 0xb0, 0xd2, 0xf3, 0x0f, 0x04, 0xe9, 0xc9, 0xab, 0x66, 0x5b, 0x75, 0x51, 0xdc, 0xe5, 0x8f, 0xe8, 0x1f, 0xdb, 0x03, 0x0f, 0xb0, 0x7d, 0xf9, 0x20, 0x64, 0x89, 0xe9, 0xdc, 0xe6, 0x24, 0xc3, 0xd5, 0xd2, 0x41, 0xa6, 0xe4, 0xe3, 0xc4, 0x79, 0x7c, 0x0f, 0xa1, 0x61, 0x2f, 0xda, 0xa4, 0xc9, 0xfd, 0xad, 0x5c, 0x65, 0x6a, 0xf3,
- /* (2^279)P */ 0xd5, 0xab, 0x72, 0x7a, 0x3b, 0x59, 0xea, 0xcf, 0xd5, 0x17, 0xd2, 0xb2, 0x5f, 0x2d, 0xab, 0xad, 0x9e, 0x88, 0x64, 0x55, 0x96, 0x6e, 0xf3, 0x44, 0xa9, 0x11, 0xf5, 0xf8, 0x3a, 0xf1, 0xcd, 0x79, 0x4c, 0x99, 0x6d, 0x23, 0x6a, 0xa0, 0xc2, 0x1a, 0x19, 0x45, 0xb5, 0xd8, 0x95, 0x2f, 0x49, 0xe9, 0x46, 0x39, 0x26, 0x60, 0x04, 0x15, 0x8b, 0xcc,
- /* (2^280)P */ 0x66, 0x0c, 0xf0, 0x54, 0x41, 0x02, 0x91, 0xab, 0xe5, 0x85, 0x8a, 0x44, 0xa6, 0x34, 0x96, 0x32, 0xc0, 0xdf, 0x6c, 0x41, 0x39, 0xd4, 0xc6, 0xe1, 0xe3, 0x81, 0xb0, 0x4c, 0x34, 0x4f, 0xe5, 0xf4, 0x35, 0x46, 0x1f, 0xeb, 0x75, 0xfd, 0x43, 0x37, 0x50, 0x99, 0xab, 0xad, 0xb7, 0x8c, 0xa1, 0x57, 0xcb, 0xe6, 0xce, 0x16, 0x2e, 0x85, 0xcc, 0xf9,
- /* (2^281)P */ 0x63, 0xd1, 0x3f, 0x9e, 0xa2, 0x17, 0x2e, 0x1d, 0x3e, 0xce, 0x48, 0x2d, 0xbb, 0x8f, 0x69, 0xc9, 0xa6, 0x3d, 0x4e, 0xfe, 0x09, 0x56, 0xb3, 0x02, 0x5f, 0x99, 0x97, 0x0c, 0x54, 0xda, 0x32, 0x97, 0x9b, 0xf4, 0x95, 0xf1, 0xad, 0xe3, 0x2b, 0x04, 0xa7, 0x9b, 0x3f, 0xbb, 0xe7, 0x87, 0x2e, 0x1f, 0x8b, 0x4b, 0x7a, 0xa4, 0x43, 0x0c, 0x0f, 0x35,
- /* (2^282)P */ 0x05, 0xdc, 0xe0, 0x2c, 0xa1, 0xc1, 0xd0, 0xf1, 0x1f, 0x4e, 0xc0, 0x6c, 0x35, 0x7b, 0xca, 0x8f, 0x8b, 0x02, 0xb1, 0xf7, 0xd6, 0x2e, 0xe7, 0x93, 0x80, 0x85, 0x18, 0x88, 0x19, 0xb9, 0xb4, 0x4a, 0xbc, 0xeb, 0x5a, 0x78, 0x38, 0xed, 0xc6, 0x27, 0x2a, 0x74, 0x76, 0xf0, 0x1b, 0x79, 0x92, 0x2f, 0xd2, 0x81, 0x98, 0xdf, 0xa9, 0x50, 0x19, 0xeb,
- /* (2^283)P */ 0xb5, 0xe7, 0xb4, 0x11, 0x3a, 0x81, 0xb6, 0xb4, 0xf8, 0xa2, 0xb3, 0x6c, 0xfc, 0x9d, 0xe0, 0xc0, 0xe0, 0x59, 0x7f, 0x05, 0x37, 0xef, 0x2c, 0xa9, 0x3a, 0x24, 0xac, 0x7b, 0x25, 0xa0, 0x55, 0xd2, 0x44, 0x82, 0x82, 0x6e, 0x64, 0xa3, 0x58, 0xc8, 0x67, 0xae, 0x26, 0xa7, 0x0f, 0x42, 0x63, 0xe1, 0x93, 0x01, 0x52, 0x19, 0xaf, 0x49, 0x3e, 0x33,
- /* (2^284)P */ 0x05, 0x85, 0xe6, 0x66, 0xaf, 0x5f, 0xdf, 0xbf, 0x9d, 0x24, 0x62, 0x60, 0x90, 0xe2, 0x4c, 0x7d, 0x4e, 0xc3, 0x74, 0x5d, 0x4f, 0x53, 0xf3, 0x63, 0x13, 0xf4, 0x74, 0x28, 0x6b, 0x7d, 0x57, 0x0c, 0x9d, 0x84, 0xa7, 0x1a, 0xff, 0xa0, 0x79, 0xdf, 0xfc, 0x65, 0x98, 0x8e, 0x22, 0x0d, 0x62, 0x7e, 0xf2, 0x34, 0x60, 0x83, 0x05, 0x14, 0xb1, 0xc1,
- /* (2^285)P */ 0x64, 0x22, 0xcc, 0xdf, 0x5c, 0xbc, 0x88, 0x68, 0x4c, 0xd9, 0xbc, 0x0e, 0xc9, 0x8b, 0xb4, 0x23, 0x52, 0xad, 0xb0, 0xb3, 0xf1, 0x17, 0xd8, 0x15, 0x04, 0x6b, 0x99, 0xf0, 0xc4, 0x7d, 0x48, 0x22, 0x4a, 0xf8, 0x6f, 0xaa, 0x88, 0x0d, 0xc5, 0x5e, 0xa9, 0x1c, 0x61, 0x3d, 0x95, 0xa9, 0x7b, 0x6a, 0x79, 0x33, 0x0a, 0x2b, 0x99, 0xe3, 0x4e, 0x48,
- /* (2^286)P */ 0x6b, 0x9b, 0x6a, 0x2a, 0xf1, 0x60, 0x31, 0xb4, 0x73, 0xd1, 0x87, 0x45, 0x9c, 0x15, 0x58, 0x4b, 0x91, 0x6d, 0x94, 0x1c, 0x41, 0x11, 0x4a, 0x83, 0xec, 0xaf, 0x65, 0xbc, 0x34, 0xaa, 0x26, 0xe2, 0xaf, 0xed, 0x46, 0x05, 0x4e, 0xdb, 0xc6, 0x4e, 0x10, 0x28, 0x4e, 0x72, 0xe5, 0x31, 0xa3, 0x20, 0xd7, 0xb1, 0x96, 0x64, 0xf6, 0xce, 0x08, 0x08,
- /* (2^287)P */ 0x16, 0xa9, 0x5c, 0x9f, 0x9a, 0xb4, 0xb8, 0xc8, 0x32, 0x78, 0xc0, 0x3a, 0xd9, 0x5f, 0x94, 0xac, 0x3a, 0x42, 0x1f, 0x43, 0xd6, 0x80, 0x47, 0x2c, 0xdc, 0x76, 0x27, 0xfa, 0x50, 0xe5, 0xa1, 0xe4, 0xc3, 0xcb, 0x61, 0x31, 0xe1, 0x2e, 0xde, 0x81, 0x3b, 0x77, 0x1c, 0x39, 0x3c, 0xdb, 0xda, 0x87, 0x4b, 0x84, 0x12, 0xeb, 0xdd, 0x54, 0xbf, 0xe7,
- /* (2^288)P */ 0xbf, 0xcb, 0x73, 0x21, 0x3d, 0x7e, 0x13, 0x8c, 0xa6, 0x34, 0x21, 0x2b, 0xa5, 0xe4, 0x9f, 0x8e, 0x9c, 0x01, 0x9c, 0x43, 0xd9, 0xc7, 0xb9, 0xf1, 0xbe, 0x7f, 0x45, 0x51, 0x97, 0xa1, 0x8e, 0x01, 0xf8, 0xbd, 0xd2, 0xbf, 0x81, 0x3a, 0x8b, 0xab, 0xe4, 0x89, 0xb7, 0xbd, 0xf2, 0xcd, 0xa9, 0x8a, 0x8a, 0xde, 0xfb, 0x8a, 0x55, 0x12, 0x7b, 0x17,
- /* (2^289)P */ 0x1b, 0x95, 0x58, 0x4d, 0xe6, 0x51, 0x31, 0x52, 0x1c, 0xd8, 0x15, 0x84, 0xb1, 0x0d, 0x36, 0x25, 0x88, 0x91, 0x46, 0x71, 0x42, 0x56, 0xe2, 0x90, 0x08, 0x9e, 0x77, 0x1b, 0xee, 0x22, 0x3f, 0xec, 0xee, 0x8c, 0x7b, 0x2e, 0x79, 0xc4, 0x6c, 0x07, 0xa1, 0x7e, 0x52, 0xf5, 0x26, 0x5c, 0x84, 0x2a, 0x50, 0x6e, 0x82, 0xb3, 0x76, 0xda, 0x35, 0x16,
- /* (2^290)P */ 0x0a, 0x6f, 0x99, 0x87, 0xc0, 0x7d, 0x8a, 0xb2, 0xca, 0xae, 0xe8, 0x65, 0x98, 0x0f, 0xb3, 0x44, 0xe1, 0xdc, 0x52, 0x79, 0x75, 0xec, 0x8f, 0x95, 0x87, 0x45, 0xd1, 0x32, 0x18, 0x55, 0x15, 0xce, 0x64, 0x9b, 0x08, 0x4f, 0x2c, 0xea, 0xba, 0x1c, 0x57, 0x06, 0x63, 0xc8, 0xb1, 0xfd, 0xc5, 0x67, 0xe7, 0x1f, 0x87, 0x9e, 0xde, 0x72, 0x7d, 0xec,
- /* (2^291)P */ 0x36, 0x8b, 0x4d, 0x2c, 0xc2, 0x46, 0xe8, 0x96, 0xac, 0x0b, 0x8c, 0xc5, 0x09, 0x10, 0xfc, 0xf2, 0xda, 0xea, 0x22, 0xb2, 0xd3, 0x89, 0xeb, 0xb2, 0x85, 0x0f, 0xff, 0x59, 0x50, 0x2c, 0x99, 0x5a, 0x1f, 0xec, 0x2a, 0x6f, 0xec, 0xcf, 0xe9, 0xce, 0x12, 0x6b, 0x19, 0xd8, 0xde, 0x9b, 0xce, 0x0e, 0x6a, 0xaa, 0xe1, 0x32, 0xea, 0x4c, 0xfe, 0x92,
- /* (2^292)P */ 0x5f, 0x17, 0x70, 0x53, 0x26, 0x03, 0x0b, 0xab, 0xd1, 0xc1, 0x42, 0x0b, 0xab, 0x2b, 0x3d, 0x31, 0xa4, 0xd5, 0x2b, 0x5e, 0x00, 0xd5, 0x9a, 0x22, 0x34, 0xe0, 0x53, 0x3f, 0x59, 0x7f, 0x2c, 0x6d, 0x72, 0x9a, 0xa4, 0xbe, 0x3d, 0x42, 0x05, 0x1b, 0xf2, 0x7f, 0x88, 0x56, 0xd1, 0x7c, 0x7d, 0x6b, 0x9f, 0x43, 0xfe, 0x65, 0x19, 0xae, 0x9c, 0x4c,
- /* (2^293)P */ 0xf3, 0x7c, 0x20, 0xa9, 0xfc, 0xf2, 0xf2, 0x3b, 0x3c, 0x57, 0x41, 0x94, 0xe5, 0xcc, 0x6a, 0x37, 0x5d, 0x09, 0xf2, 0xab, 0xc2, 0xca, 0x60, 0x38, 0x6b, 0x7a, 0xe1, 0x78, 0x2b, 0xc1, 0x1d, 0xe8, 0xfd, 0xbc, 0x3d, 0x5c, 0xa2, 0xdb, 0x49, 0x20, 0x79, 0xe6, 0x1b, 0x9b, 0x65, 0xd9, 0x6d, 0xec, 0x57, 0x1d, 0xd2, 0xe9, 0x90, 0xeb, 0x43, 0x7b,
- /* (2^294)P */ 0x2a, 0x8b, 0x2e, 0x19, 0x18, 0x10, 0xb8, 0x83, 0xe7, 0x7d, 0x2d, 0x9a, 0x3a, 0xe5, 0xd1, 0xe4, 0x7c, 0x38, 0xe5, 0x59, 0x2a, 0x6e, 0xd9, 0x01, 0x29, 0x3d, 0x23, 0xf7, 0x52, 0xba, 0x61, 0x04, 0x9a, 0xde, 0xc4, 0x31, 0x50, 0xeb, 0x1b, 0xaa, 0xde, 0x39, 0x58, 0xd8, 0x1b, 0x1e, 0xfc, 0x57, 0x9a, 0x28, 0x43, 0x9e, 0x97, 0x5e, 0xaa, 0xa3,
- /* (2^295)P */ 0x97, 0x0a, 0x74, 0xc4, 0x39, 0x99, 0x6b, 0x40, 0xc7, 0x3e, 0x8c, 0xa7, 0xb1, 0x4e, 0x9a, 0x59, 0x6e, 0x1c, 0xfe, 0xfc, 0x2a, 0x5e, 0x73, 0x2b, 0x8c, 0xa9, 0x71, 0xf5, 0xda, 0x6b, 0x15, 0xab, 0xf7, 0xbe, 0x2a, 0x44, 0x5f, 0xba, 0xae, 0x67, 0x93, 0xc5, 0x86, 0xc1, 0xb8, 0xdf, 0xdc, 0xcb, 0xd7, 0xff, 0xb1, 0x71, 0x7c, 0x6f, 0x88, 0xf8,
- /* (2^296)P */ 0x3f, 0x89, 0xb1, 0xbf, 0x24, 0x16, 0xac, 0x56, 0xfe, 0xdf, 0x94, 0x71, 0xbf, 0xd6, 0x57, 0x0c, 0xb4, 0x77, 0x37, 0xaa, 0x2a, 0x70, 0x76, 0x49, 0xaf, 0x0c, 0x97, 0x8e, 0x78, 0x2a, 0x67, 0xc9, 0x3b, 0x3d, 0x5b, 0x01, 0x2f, 0xda, 0xd5, 0xa8, 0xde, 0x02, 0xa9, 0xac, 0x76, 0x00, 0x0b, 0x46, 0xc6, 0x2d, 0xdc, 0x08, 0xf4, 0x10, 0x2c, 0xbe,
- /* (2^297)P */ 0xcb, 0x07, 0xf9, 0x91, 0xc6, 0xd5, 0x3e, 0x54, 0x63, 0xae, 0xfc, 0x10, 0xbe, 0x3a, 0x20, 0x73, 0x4e, 0x65, 0x0e, 0x2d, 0x86, 0x77, 0x83, 0x9d, 0xe2, 0x0a, 0xe9, 0xac, 0x22, 0x52, 0x76, 0xd4, 0x6e, 0xfa, 0xe0, 0x09, 0xef, 0x78, 0x82, 0x9f, 0x26, 0xf9, 0x06, 0xb5, 0xe7, 0x05, 0x0e, 0xf2, 0x46, 0x72, 0x93, 0xd3, 0x24, 0xbd, 0x87, 0x60,
- /* (2^298)P */ 0x14, 0x55, 0x84, 0x7b, 0x6c, 0x60, 0x80, 0x73, 0x8c, 0xbe, 0x2d, 0xd6, 0x69, 0xd6, 0x17, 0x26, 0x44, 0x9f, 0x88, 0xa2, 0x39, 0x7c, 0x89, 0xbc, 0x6d, 0x9e, 0x46, 0xb6, 0x68, 0x66, 0xea, 0xdc, 0x31, 0xd6, 0x21, 0x51, 0x9f, 0x28, 0x28, 0xaf, 0x9e, 0x47, 0x2c, 0x4c, 0x8f, 0xf3, 0xaf, 0x1f, 0xe4, 0xab, 0xac, 0xe9, 0x0c, 0x91, 0x3a, 0x61,
- /* (2^299)P */ 0xb0, 0x37, 0x55, 0x4b, 0xe9, 0xc3, 0xb1, 0xce, 0x42, 0xe6, 0xc5, 0x11, 0x7f, 0x2c, 0x11, 0xfc, 0x4e, 0x71, 0x17, 0x00, 0x74, 0x7f, 0xbf, 0x07, 0x4d, 0xfd, 0x40, 0xb2, 0x87, 0xb0, 0xef, 0x1f, 0x35, 0x2c, 0x2d, 0xd7, 0xe1, 0xe4, 0xad, 0x0e, 0x7f, 0x63, 0x66, 0x62, 0x23, 0x41, 0xf6, 0xc1, 0x14, 0xa6, 0xd7, 0xa9, 0x11, 0x56, 0x9d, 0x1b,
- /* (2^300)P */ 0x02, 0x82, 0x42, 0x18, 0x4f, 0x1b, 0xc9, 0x5d, 0x78, 0x5f, 0xee, 0xed, 0x01, 0x49, 0x8f, 0xf2, 0xa0, 0xe2, 0x6e, 0xbb, 0x6b, 0x04, 0x8d, 0xb2, 0x41, 0xae, 0xc8, 0x1b, 0x59, 0x34, 0xb8, 0x2a, 0xdb, 0x1f, 0xd2, 0x52, 0xdf, 0x3f, 0x35, 0x00, 0x8b, 0x61, 0xbc, 0x97, 0xa0, 0xc4, 0x77, 0xd1, 0xe4, 0x2c, 0x59, 0x68, 0xff, 0x30, 0xf2, 0xe2,
- /* (2^301)P */ 0x79, 0x08, 0xb1, 0xdb, 0x55, 0xae, 0xd0, 0xed, 0xda, 0xa0, 0xec, 0x6c, 0xae, 0x68, 0xf2, 0x0b, 0x61, 0xb3, 0xf5, 0x21, 0x69, 0x87, 0x0b, 0x03, 0xea, 0x8a, 0x15, 0xd9, 0x7e, 0xca, 0xf7, 0xcd, 0xf3, 0x33, 0xb3, 0x4c, 0x5b, 0x23, 0x4e, 0x6f, 0x90, 0xad, 0x91, 0x4b, 0x4f, 0x46, 0x37, 0xe5, 0xe8, 0xb7, 0xeb, 0xd5, 0xca, 0x34, 0x4e, 0x23,
- /* (2^302)P */ 0x09, 0x02, 0xdd, 0xfd, 0x70, 0xac, 0x56, 0x80, 0x36, 0x5e, 0x49, 0xd0, 0x3f, 0xc2, 0xe0, 0xba, 0x46, 0x7f, 0x5c, 0xf7, 0xc5, 0xbd, 0xd5, 0x55, 0x7d, 0x3f, 0xd5, 0x7d, 0x06, 0xdf, 0x27, 0x20, 0x4f, 0xe9, 0x30, 0xec, 0x1b, 0xa0, 0x0c, 0xd4, 0x2c, 0xe1, 0x2b, 0x65, 0x73, 0xea, 0x75, 0x35, 0xe8, 0xe6, 0x56, 0xd6, 0x07, 0x15, 0x99, 0xdf,
- /* (2^303)P */ 0x4e, 0x10, 0xb7, 0xd0, 0x63, 0x8c, 0xcf, 0x16, 0x00, 0x7c, 0x58, 0xdf, 0x86, 0xdc, 0x4e, 0xca, 0x9c, 0x40, 0x5a, 0x42, 0xfd, 0xec, 0x98, 0xa4, 0x42, 0x53, 0xae, 0x16, 0x9d, 0xfd, 0x75, 0x5a, 0x12, 0x56, 0x1e, 0xc6, 0x57, 0xcc, 0x79, 0x27, 0x96, 0x00, 0xcf, 0x80, 0x4f, 0x8a, 0x36, 0x5c, 0xbb, 0xe9, 0x12, 0xdb, 0xb6, 0x2b, 0xad, 0x96,
- /* (2^304)P */ 0x92, 0x32, 0x1f, 0xfd, 0xc6, 0x02, 0x94, 0x08, 0x1b, 0x60, 0x6a, 0x9f, 0x8b, 0xd6, 0xc8, 0xad, 0xd5, 0x1b, 0x27, 0x4e, 0xa4, 0x4d, 0x4a, 0x00, 0x10, 0x5f, 0x86, 0x11, 0xf5, 0xe3, 0x14, 0x32, 0x43, 0xee, 0xb9, 0xc7, 0xab, 0xf4, 0x6f, 0xe5, 0x66, 0x0c, 0x06, 0x0d, 0x96, 0x79, 0x28, 0xaf, 0x45, 0x2b, 0x56, 0xbe, 0xe4, 0x4a, 0x52, 0xd6,
- /* (2^305)P */ 0x15, 0x16, 0x69, 0xef, 0x60, 0xca, 0x82, 0x25, 0x0f, 0xc6, 0x30, 0xa0, 0x0a, 0xd1, 0x83, 0x29, 0xcd, 0xb6, 0x89, 0x6c, 0xf5, 0xb2, 0x08, 0x38, 0xe6, 0xca, 0x6b, 0x19, 0x93, 0xc6, 0x5f, 0x75, 0x8e, 0x60, 0x34, 0x23, 0xc4, 0x13, 0x17, 0x69, 0x55, 0xcc, 0x72, 0x9c, 0x2b, 0x6c, 0x80, 0xf4, 0x4b, 0x8b, 0xb6, 0x97, 0x65, 0x07, 0xb6, 0xfb,
- /* (2^306)P */ 0x01, 0x99, 0x74, 0x28, 0xa6, 0x67, 0xa3, 0xe5, 0x25, 0xfb, 0xdf, 0x82, 0x93, 0xe7, 0x35, 0x74, 0xce, 0xe3, 0x15, 0x1c, 0x1d, 0x79, 0x52, 0x84, 0x08, 0x04, 0x2f, 0x5c, 0xb8, 0xcd, 0x7f, 0x89, 0xb0, 0x39, 0x93, 0x63, 0xc9, 0x5d, 0x06, 0x01, 0x59, 0xf7, 0x7e, 0xf1, 0x4c, 0x3d, 0x12, 0x8d, 0x69, 0x1d, 0xb7, 0x21, 0x5e, 0x88, 0x82, 0xa2,
- /* (2^307)P */ 0x8e, 0x69, 0xaf, 0x9a, 0x41, 0x0d, 0x9d, 0xcf, 0x8e, 0x8d, 0x5c, 0x51, 0x6e, 0xde, 0x0e, 0x48, 0x23, 0x89, 0xe5, 0x37, 0x80, 0xd6, 0x9d, 0x72, 0x32, 0x26, 0x38, 0x2d, 0x63, 0xa0, 0xfa, 0xd3, 0x40, 0xc0, 0x8c, 0x68, 0x6f, 0x2b, 0x1e, 0x9a, 0x39, 0x51, 0x78, 0x74, 0x9a, 0x7b, 0x4a, 0x8f, 0x0c, 0xa0, 0x88, 0x60, 0xa5, 0x21, 0xcd, 0xc7,
- /* (2^308)P */ 0x3a, 0x7f, 0x73, 0x14, 0xbf, 0x89, 0x6a, 0x4c, 0x09, 0x5d, 0xf2, 0x93, 0x20, 0x2d, 0xc4, 0x29, 0x86, 0x06, 0x95, 0xab, 0x22, 0x76, 0x4c, 0x54, 0xe1, 0x7e, 0x80, 0x6d, 0xab, 0x29, 0x61, 0x87, 0x77, 0xf6, 0xc0, 0x3e, 0xda, 0xab, 0x65, 0x7e, 0x39, 0x12, 0xa1, 0x6b, 0x42, 0xf7, 0xc5, 0x97, 0x77, 0xec, 0x6f, 0x22, 0xbe, 0x44, 0xc7, 0x03,
- /* (2^309)P */ 0xa5, 0x23, 0x90, 0x41, 0xa3, 0xc5, 0x3e, 0xe0, 0xa5, 0x32, 0x49, 0x1f, 0x39, 0x78, 0xb1, 0xd8, 0x24, 0xea, 0xd4, 0x87, 0x53, 0x42, 0x51, 0xf4, 0xd9, 0x46, 0x25, 0x2f, 0x62, 0xa9, 0x90, 0x9a, 0x4a, 0x25, 0x8a, 0xd2, 0x10, 0xe7, 0x3c, 0xbc, 0x58, 0x8d, 0x16, 0x14, 0x96, 0xa4, 0x6f, 0xf8, 0x12, 0x69, 0x91, 0x73, 0xe2, 0xfa, 0xf4, 0x57,
- /* (2^310)P */ 0x51, 0x45, 0x3f, 0x96, 0xdc, 0x97, 0x38, 0xa6, 0x01, 0x63, 0x09, 0xea, 0xc2, 0x13, 0x30, 0xb0, 0x00, 0xb8, 0x0a, 0xce, 0xd1, 0x8f, 0x3e, 0x69, 0x62, 0x46, 0x33, 0x9c, 0xbf, 0x4b, 0xcb, 0x0c, 0x90, 0x1c, 0x45, 0xcf, 0x37, 0x5b, 0xf7, 0x4b, 0x5e, 0x95, 0xc3, 0x28, 0x9f, 0x08, 0x83, 0x53, 0x74, 0xab, 0x0c, 0xb4, 0xc0, 0xa1, 0xbc, 0x89,
- /* (2^311)P */ 0x06, 0xb1, 0x51, 0x15, 0x65, 0x60, 0x21, 0x17, 0x7a, 0x20, 0x65, 0xee, 0x12, 0x35, 0x4d, 0x46, 0xf4, 0xf8, 0xd0, 0xb1, 0xca, 0x09, 0x30, 0x08, 0x89, 0x23, 0x3b, 0xe7, 0xab, 0x8b, 0x77, 0xa6, 0xad, 0x25, 0xdd, 0xea, 0x3c, 0x7d, 0xa5, 0x24, 0xb3, 0xe8, 0xfa, 0xfb, 0xc9, 0xf2, 0x71, 0xe9, 0xfa, 0xf2, 0xdc, 0x54, 0xdd, 0x55, 0x2e, 0x2f,
- /* (2^312)P */ 0x7f, 0x96, 0x96, 0xfb, 0x52, 0x86, 0xcf, 0xea, 0x62, 0x18, 0xf1, 0x53, 0x1f, 0x61, 0x2a, 0x9f, 0x8c, 0x51, 0xca, 0x2c, 0xde, 0x6d, 0xce, 0xab, 0x58, 0x32, 0x0b, 0x33, 0x9b, 0x99, 0xb4, 0x5c, 0x88, 0x2a, 0x76, 0xcc, 0x3e, 0x54, 0x1e, 0x9d, 0xa2, 0x89, 0xe4, 0x19, 0xba, 0x80, 0xc8, 0x39, 0x32, 0x7f, 0x0f, 0xc7, 0x84, 0xbb, 0x43, 0x56,
- /* (2^313)P */ 0x9b, 0x07, 0xb4, 0x42, 0xa9, 0xa0, 0x78, 0x4f, 0x28, 0x70, 0x2b, 0x7e, 0x61, 0xe0, 0xdd, 0x02, 0x98, 0xfc, 0xed, 0x31, 0x80, 0xf1, 0x15, 0x52, 0x89, 0x23, 0xcd, 0x5d, 0x2b, 0xc5, 0x19, 0x32, 0xfb, 0x70, 0x50, 0x7a, 0x97, 0x6b, 0x42, 0xdb, 0xca, 0xdb, 0xc4, 0x59, 0x99, 0xe0, 0x12, 0x1f, 0x17, 0xba, 0x8b, 0xf0, 0xc4, 0x38, 0x5d, 0x27,
- /* (2^314)P */ 0x29, 0x1d, 0xdc, 0x2b, 0xf6, 0x5b, 0x04, 0x61, 0x36, 0x76, 0xa0, 0x56, 0x36, 0x6e, 0xd7, 0x24, 0x4d, 0xe7, 0xef, 0x44, 0xd2, 0xd5, 0x07, 0xcd, 0xc4, 0x9d, 0x80, 0x48, 0xc3, 0x38, 0xcf, 0xd8, 0xa3, 0xdd, 0xb2, 0x5e, 0xb5, 0x70, 0x15, 0xbb, 0x36, 0x85, 0x8a, 0xd7, 0xfb, 0x56, 0x94, 0x73, 0x9c, 0x81, 0xbe, 0xb1, 0x44, 0x28, 0xf1, 0x37,
- /* (2^315)P */ 0xbf, 0xcf, 0x5c, 0xd2, 0xe2, 0xea, 0xc2, 0xcd, 0x70, 0x7a, 0x9d, 0xcb, 0x81, 0xc1, 0xe9, 0xf1, 0x56, 0x71, 0x52, 0xf7, 0x1b, 0x87, 0xc6, 0xd8, 0xcc, 0xb2, 0x69, 0xf3, 0xb0, 0xbd, 0xba, 0x83, 0x12, 0x26, 0xc4, 0xce, 0x72, 0xde, 0x3b, 0x21, 0x28, 0x9e, 0x5a, 0x94, 0xf5, 0x04, 0xa3, 0xc8, 0x0f, 0x5e, 0xbc, 0x71, 0xf9, 0x0d, 0xce, 0xf5,
- /* (2^316)P */ 0x93, 0x97, 0x00, 0x85, 0xf4, 0xb4, 0x40, 0xec, 0xd9, 0x2b, 0x6c, 0xd6, 0x63, 0x9e, 0x93, 0x0a, 0x5a, 0xf4, 0xa7, 0x9a, 0xe3, 0x3c, 0xf0, 0x55, 0xd1, 0x96, 0x6c, 0xf5, 0x2a, 0xce, 0xd7, 0x95, 0x72, 0xbf, 0xc5, 0x0c, 0xce, 0x79, 0xa2, 0x0a, 0x78, 0xe0, 0x72, 0xd0, 0x66, 0x28, 0x05, 0x75, 0xd3, 0x23, 0x09, 0x91, 0xed, 0x7e, 0xc4, 0xbc,
- /* (2^317)P */ 0x77, 0xc2, 0x9a, 0xf7, 0xa6, 0xe6, 0x18, 0xb4, 0xe7, 0xf6, 0xda, 0xec, 0x44, 0x6d, 0xfb, 0x08, 0xee, 0x65, 0xa8, 0x92, 0x85, 0x1f, 0xba, 0x38, 0x93, 0x20, 0x5c, 0x4d, 0xd2, 0x18, 0x0f, 0x24, 0xbe, 0x1a, 0x96, 0x44, 0x7d, 0xeb, 0xb3, 0xda, 0x95, 0xf4, 0xaf, 0x6c, 0x06, 0x0f, 0x47, 0x37, 0xc8, 0x77, 0x63, 0xe1, 0x29, 0xef, 0xff, 0xa5,
- /* (2^318)P */ 0x16, 0x12, 0xd9, 0x47, 0x90, 0x22, 0x9b, 0x05, 0xf2, 0xa5, 0x9a, 0xae, 0x83, 0x98, 0xb5, 0xac, 0xab, 0x29, 0xaa, 0xdc, 0x5f, 0xde, 0xcd, 0xf7, 0x42, 0xad, 0x3b, 0x96, 0xd6, 0x3e, 0x6e, 0x52, 0x47, 0xb1, 0xab, 0x51, 0xde, 0x49, 0x7c, 0x87, 0x8d, 0x86, 0xe2, 0x70, 0x13, 0x21, 0x51, 0x1c, 0x0c, 0x25, 0xc1, 0xb0, 0xe6, 0x19, 0xcf, 0x12,
- /* (2^319)P */ 0xf0, 0xbc, 0x97, 0x8f, 0x4b, 0x2f, 0xd1, 0x1f, 0x8c, 0x57, 0xed, 0x3c, 0xf4, 0x26, 0x19, 0xbb, 0x60, 0xca, 0x24, 0xc5, 0xd9, 0x97, 0xe2, 0x5f, 0x76, 0x49, 0x39, 0x7e, 0x2d, 0x12, 0x21, 0x98, 0xda, 0xe6, 0xdb, 0xd2, 0xd8, 0x9f, 0x18, 0xd8, 0x83, 0x6c, 0xba, 0x89, 0x8d, 0x29, 0xfa, 0x46, 0x33, 0x8c, 0x28, 0xdf, 0x6a, 0xb3, 0x69, 0x28,
- /* (2^320)P */ 0x86, 0x17, 0xbc, 0xd6, 0x7c, 0xba, 0x1e, 0x83, 0xbb, 0x84, 0xb5, 0x8c, 0xad, 0xdf, 0xa1, 0x24, 0x81, 0x70, 0x40, 0x0f, 0xad, 0xad, 0x3b, 0x23, 0xd0, 0x93, 0xa0, 0x49, 0x5c, 0x4b, 0x51, 0xbe, 0x20, 0x49, 0x4e, 0xda, 0x2d, 0xd3, 0xad, 0x1b, 0x74, 0x08, 0x41, 0xf0, 0xef, 0x19, 0xe9, 0x45, 0x5d, 0x02, 0xae, 0x26, 0x25, 0xd9, 0xd1, 0xc2,
- /* (2^321)P */ 0x48, 0x81, 0x3e, 0xb2, 0x83, 0xf8, 0x4d, 0xb3, 0xd0, 0x4c, 0x75, 0xb3, 0xa0, 0x52, 0x26, 0xf2, 0xaf, 0x5d, 0x36, 0x70, 0x72, 0xd6, 0xb7, 0x88, 0x08, 0x69, 0xbd, 0x15, 0x25, 0xb1, 0x45, 0x1b, 0xb7, 0x0b, 0x5f, 0x71, 0x5d, 0x83, 0x49, 0xb9, 0x84, 0x3b, 0x7c, 0xc1, 0x50, 0x93, 0x05, 0x53, 0xe0, 0x61, 0xea, 0xc1, 0xef, 0xdb, 0x82, 0x97,
- /* (2^322)P */ 0x00, 0xd5, 0xc3, 0x3a, 0x4d, 0x8a, 0x23, 0x7a, 0xef, 0xff, 0x37, 0xef, 0xf3, 0xbc, 0xa9, 0xb6, 0xae, 0xd7, 0x3a, 0x7b, 0xfd, 0x3e, 0x8e, 0x9b, 0xab, 0x44, 0x54, 0x60, 0x28, 0x6c, 0xbf, 0x15, 0x24, 0x4a, 0x56, 0x60, 0x7f, 0xa9, 0x7a, 0x28, 0x59, 0x2c, 0x8a, 0xd1, 0x7d, 0x6b, 0x00, 0xfd, 0xa5, 0xad, 0xbc, 0x19, 0x3f, 0xcb, 0x73, 0xe0,
- /* (2^323)P */ 0xcf, 0x9e, 0x66, 0x06, 0x4d, 0x2b, 0xf5, 0x9c, 0xc2, 0x9d, 0x9e, 0xed, 0x5a, 0x5c, 0x2d, 0x00, 0xbf, 0x29, 0x90, 0x88, 0xe4, 0x5d, 0xfd, 0xe2, 0xf0, 0x38, 0xec, 0x4d, 0x26, 0xea, 0x54, 0xf0, 0x3c, 0x84, 0x10, 0x6a, 0xf9, 0x66, 0x9c, 0xe7, 0x21, 0xfd, 0x0f, 0xc7, 0x13, 0x50, 0x81, 0xb6, 0x50, 0xf9, 0x04, 0x7f, 0xa4, 0x37, 0x85, 0x14,
- /* (2^324)P */ 0xdb, 0x87, 0x49, 0xc7, 0xa8, 0x39, 0x0c, 0x32, 0x98, 0x0c, 0xb9, 0x1a, 0x1b, 0x4d, 0xe0, 0x8a, 0x9a, 0x8e, 0x8f, 0xab, 0x5a, 0x17, 0x3d, 0x04, 0x21, 0xce, 0x3e, 0x2c, 0xf9, 0xa3, 0x97, 0xe4, 0x77, 0x95, 0x0e, 0xb6, 0xa5, 0x15, 0xad, 0x3a, 0x1e, 0x46, 0x53, 0x17, 0x09, 0x83, 0x71, 0x4e, 0x86, 0x38, 0xd5, 0x23, 0x44, 0x16, 0x8d, 0xc8,
- /* (2^325)P */ 0x05, 0x5e, 0x99, 0x08, 0xbb, 0xc3, 0xc0, 0xb7, 0x6c, 0x12, 0xf2, 0xf3, 0xf4, 0x7c, 0x6a, 0x4d, 0x9e, 0xeb, 0x3d, 0xb9, 0x63, 0x94, 0xce, 0x81, 0xd8, 0x11, 0xcb, 0x55, 0x69, 0x4a, 0x20, 0x0b, 0x4c, 0x2e, 0x14, 0xb8, 0xd4, 0x6a, 0x7c, 0xf0, 0xed, 0xfc, 0x8f, 0xef, 0xa0, 0xeb, 0x6c, 0x01, 0xe2, 0xdc, 0x10, 0x22, 0xa2, 0x01, 0x85, 0x64,
- /* (2^326)P */ 0x58, 0xe1, 0x9c, 0x27, 0x55, 0xc6, 0x25, 0xa6, 0x7d, 0x67, 0x88, 0x65, 0x99, 0x6c, 0xcb, 0xdb, 0x27, 0x4f, 0x44, 0x29, 0xf5, 0x4a, 0x23, 0x10, 0xbc, 0x03, 0x3f, 0x36, 0x1e, 0xef, 0xb0, 0xba, 0x75, 0xe8, 0x74, 0x5f, 0x69, 0x3e, 0x26, 0x40, 0xb4, 0x2f, 0xdc, 0x43, 0xbf, 0xa1, 0x8b, 0xbd, 0xca, 0x6e, 0xc1, 0x6e, 0x21, 0x79, 0xa0, 0xd0,
- /* (2^327)P */ 0x78, 0x93, 0x4a, 0x2d, 0x22, 0x6e, 0x6e, 0x7d, 0x74, 0xd2, 0x66, 0x58, 0xce, 0x7b, 0x1d, 0x97, 0xb1, 0xf2, 0xda, 0x1c, 0x79, 0xfb, 0xba, 0xd1, 0xc0, 0xc5, 0x6e, 0xc9, 0x11, 0x89, 0xd2, 0x41, 0x8d, 0x70, 0xb9, 0xcc, 0xea, 0x6a, 0xb3, 0x45, 0xb6, 0x05, 0x2e, 0xf2, 0x17, 0xf1, 0x27, 0xb8, 0xed, 0x06, 0x1f, 0xdb, 0x9d, 0x1f, 0x69, 0x28,
- /* (2^328)P */ 0x93, 0x12, 0xa8, 0x11, 0xe1, 0x92, 0x30, 0x8d, 0xac, 0xe1, 0x1c, 0x60, 0x7c, 0xed, 0x2d, 0x2e, 0xd3, 0x03, 0x5c, 0x9c, 0xc5, 0xbd, 0x64, 0x4a, 0x8c, 0xba, 0x76, 0xfe, 0xc6, 0xc1, 0xea, 0xc2, 0x4f, 0xbe, 0x70, 0x3d, 0x64, 0xcf, 0x8e, 0x18, 0xcb, 0xcd, 0x57, 0xa7, 0xf7, 0x36, 0xa9, 0x6b, 0x3e, 0xb8, 0x69, 0xee, 0x47, 0xa2, 0x7e, 0xb2,
- /* (2^329)P */ 0x96, 0xaf, 0x3a, 0xf5, 0xed, 0xcd, 0xaf, 0xf7, 0x82, 0xaf, 0x59, 0x62, 0x0b, 0x36, 0x85, 0xf9, 0xaf, 0xd6, 0x38, 0xff, 0x87, 0x2e, 0x1d, 0x6c, 0x8b, 0xaf, 0x3b, 0xdf, 0x28, 0xa2, 0xd6, 0x4d, 0x80, 0x92, 0xc3, 0x0f, 0x34, 0xa8, 0xae, 0x69, 0x5d, 0x7b, 0x9d, 0xbc, 0xf5, 0xfd, 0x1d, 0xb1, 0x96, 0x55, 0x86, 0xe1, 0x5c, 0xb6, 0xac, 0xb9,
- /* (2^330)P */ 0x50, 0x9e, 0x37, 0x28, 0x7d, 0xa8, 0x33, 0x63, 0xda, 0x3f, 0x20, 0x98, 0x0e, 0x09, 0xa8, 0x77, 0x3b, 0x7a, 0xfc, 0x16, 0x85, 0x44, 0x64, 0x77, 0x65, 0x68, 0x92, 0x41, 0xc6, 0x1f, 0xdf, 0x27, 0xf9, 0xec, 0xa0, 0x61, 0x22, 0xea, 0x19, 0xe7, 0x75, 0x8b, 0x4e, 0xe5, 0x0f, 0xb7, 0xf7, 0xd2, 0x53, 0xf4, 0xdd, 0x4a, 0xaa, 0x78, 0x40, 0xb7,
- /* (2^331)P */ 0xd4, 0x89, 0xe3, 0x79, 0xba, 0xb6, 0xc3, 0xda, 0xe6, 0x78, 0x65, 0x7d, 0x6e, 0x22, 0x62, 0xb1, 0x3d, 0xea, 0x90, 0x84, 0x30, 0x5e, 0xd4, 0x39, 0x84, 0x78, 0xd9, 0x75, 0xd6, 0xce, 0x2a, 0x11, 0x29, 0x69, 0xa4, 0x5e, 0xaa, 0x2a, 0x98, 0x5a, 0xe5, 0x91, 0x8f, 0xb2, 0xfb, 0xda, 0x97, 0xe8, 0x83, 0x6f, 0x04, 0xb9, 0x5d, 0xaf, 0xe1, 0x9b,
- /* (2^332)P */ 0x8b, 0xe4, 0xe1, 0x48, 0x9c, 0xc4, 0x83, 0x89, 0xdf, 0x65, 0xd3, 0x35, 0x55, 0x13, 0xf4, 0x1f, 0x36, 0x92, 0x33, 0x38, 0xcb, 0xed, 0x15, 0xe6, 0x60, 0x2d, 0x25, 0xf5, 0x36, 0x60, 0x3a, 0x37, 0x9b, 0x71, 0x9d, 0x42, 0xb0, 0x14, 0xc8, 0xba, 0x62, 0xa3, 0x49, 0xb0, 0x88, 0xc1, 0x72, 0x73, 0xdd, 0x62, 0x40, 0xa9, 0x62, 0x88, 0x99, 0xca,
- /* (2^333)P */ 0x47, 0x7b, 0xea, 0xda, 0x46, 0x2f, 0x45, 0xc6, 0xe3, 0xb4, 0x4d, 0x8d, 0xac, 0x0b, 0x54, 0x22, 0x06, 0x31, 0x16, 0x66, 0x3e, 0xe4, 0x38, 0x12, 0xcd, 0xf3, 0xe7, 0x99, 0x37, 0xd9, 0x62, 0x24, 0x4b, 0x05, 0xf2, 0x58, 0xe6, 0x29, 0x4b, 0x0d, 0xf6, 0xc1, 0xba, 0xa0, 0x1e, 0x0f, 0xcb, 0x1f, 0xc6, 0x2b, 0x19, 0xfc, 0x82, 0x01, 0xd0, 0x86,
- /* (2^334)P */ 0xa2, 0xae, 0x77, 0x20, 0xfb, 0xa8, 0x18, 0xb4, 0x61, 0xef, 0xe8, 0x52, 0x79, 0xbb, 0x86, 0x90, 0x5d, 0x2e, 0x76, 0xed, 0x66, 0x60, 0x5d, 0x00, 0xb5, 0xa4, 0x00, 0x40, 0x89, 0xec, 0xd1, 0xd2, 0x0d, 0x26, 0xb9, 0x30, 0xb2, 0xd2, 0xb8, 0xe8, 0x0e, 0x56, 0xf9, 0x67, 0x94, 0x2e, 0x62, 0xe1, 0x79, 0x48, 0x2b, 0xa9, 0xfa, 0xea, 0xdb, 0x28,
- /* (2^335)P */ 0x35, 0xf1, 0xb0, 0x43, 0xbd, 0x27, 0xef, 0x18, 0x44, 0xa2, 0x04, 0xb4, 0x69, 0xa1, 0x97, 0x1f, 0x8c, 0x04, 0x82, 0x9b, 0x00, 0x6d, 0xf8, 0xbf, 0x7d, 0xc1, 0x5b, 0xab, 0xe8, 0xb2, 0x34, 0xbd, 0xaf, 0x7f, 0xb2, 0x0d, 0xf3, 0xed, 0xfc, 0x5b, 0x50, 0xee, 0xe7, 0x4a, 0x20, 0xd9, 0xf5, 0xc6, 0x9a, 0x97, 0x6d, 0x07, 0x2f, 0xb9, 0x31, 0x02,
- /* (2^336)P */ 0xf9, 0x54, 0x4a, 0xc5, 0x61, 0x7e, 0x1d, 0xa6, 0x0e, 0x1a, 0xa8, 0xd3, 0x8c, 0x36, 0x7d, 0xf1, 0x06, 0xb1, 0xac, 0x93, 0xcd, 0xe9, 0x8f, 0x61, 0x6c, 0x5d, 0x03, 0x23, 0xdf, 0x85, 0x53, 0x39, 0x63, 0x5e, 0xeb, 0xf3, 0xd3, 0xd3, 0x75, 0x97, 0x9b, 0x62, 0x9b, 0x01, 0xb3, 0x19, 0xd8, 0x2b, 0x36, 0xf2, 0x2c, 0x2c, 0x6f, 0x36, 0xc6, 0x3c,
- /* (2^337)P */ 0x05, 0x74, 0x43, 0x10, 0xb6, 0xb0, 0xf8, 0xbf, 0x02, 0x46, 0x9a, 0xee, 0xc1, 0xaf, 0xc1, 0xe5, 0x5a, 0x2e, 0xbb, 0xe1, 0xdc, 0xc6, 0xce, 0x51, 0x29, 0x50, 0xbf, 0x1b, 0xde, 0xff, 0xba, 0x4d, 0x8d, 0x8b, 0x7e, 0xe7, 0xbd, 0x5b, 0x8f, 0xbe, 0xe3, 0x75, 0x71, 0xff, 0x37, 0x05, 0x5a, 0x10, 0xeb, 0x54, 0x7e, 0x44, 0x72, 0x2c, 0xd4, 0xfc,
- /* (2^338)P */ 0x03, 0x12, 0x1c, 0xb2, 0x08, 0x90, 0xa1, 0x2d, 0x50, 0xa0, 0xad, 0x7f, 0x8d, 0xa6, 0x97, 0xc1, 0xbd, 0xdc, 0xc3, 0xa7, 0xad, 0x31, 0xdf, 0xb8, 0x03, 0x84, 0xc3, 0xb9, 0x29, 0x3d, 0x92, 0x2e, 0xc3, 0x90, 0x07, 0xe8, 0xa7, 0xc7, 0xbc, 0x61, 0xe9, 0x3e, 0xa0, 0x35, 0xda, 0x1d, 0xab, 0x48, 0xfe, 0x50, 0xc9, 0x25, 0x59, 0x23, 0x69, 0x3f,
- /* (2^339)P */ 0x8e, 0x91, 0xab, 0x6b, 0x91, 0x4f, 0x89, 0x76, 0x67, 0xad, 0xb2, 0x65, 0x9d, 0xad, 0x02, 0x36, 0xdc, 0xac, 0x96, 0x93, 0x97, 0x21, 0x14, 0xd0, 0xe8, 0x11, 0x60, 0x1e, 0xeb, 0x96, 0x06, 0xf2, 0x53, 0xf2, 0x6d, 0xb7, 0x93, 0x6f, 0x26, 0x91, 0x23, 0xe3, 0x34, 0x04, 0x92, 0x91, 0x37, 0x08, 0x50, 0xd6, 0x28, 0x09, 0x27, 0xa1, 0x0c, 0x00,
- /* (2^340)P */ 0x1f, 0xbb, 0x21, 0x26, 0x33, 0xcb, 0xa4, 0xd1, 0xee, 0x85, 0xf9, 0xd9, 0x3c, 0x90, 0xc3, 0xd1, 0x26, 0xa2, 0x25, 0x93, 0x43, 0x61, 0xed, 0x91, 0x6e, 0x54, 0x03, 0x2e, 0x42, 0x9d, 0xf7, 0xa6, 0x02, 0x0f, 0x2f, 0x9c, 0x7a, 0x8d, 0x12, 0xc2, 0x18, 0xfc, 0x41, 0xff, 0x85, 0x26, 0x1a, 0x44, 0x55, 0x0b, 0x89, 0xab, 0x6f, 0x62, 0x33, 0x8c,
- /* (2^341)P */ 0xe0, 0x3c, 0x5d, 0x70, 0x64, 0x87, 0x81, 0x35, 0xf2, 0x37, 0xa6, 0x24, 0x3e, 0xe0, 0x62, 0xd5, 0x71, 0xe7, 0x93, 0xfb, 0xac, 0xc3, 0xe7, 0xc7, 0x04, 0xe2, 0x70, 0xd3, 0x29, 0x5b, 0x21, 0xbf, 0xf4, 0x26, 0x5d, 0xf3, 0x95, 0xb4, 0x2a, 0x6a, 0x07, 0x55, 0xa6, 0x4b, 0x3b, 0x15, 0xf2, 0x25, 0x8a, 0x95, 0x3f, 0x63, 0x2f, 0x7a, 0x23, 0x96,
- /* (2^342)P */ 0x0d, 0x3d, 0xd9, 0x13, 0xa7, 0xb3, 0x5e, 0x67, 0xf7, 0x02, 0x23, 0xee, 0x84, 0xff, 0x99, 0xda, 0xb9, 0x53, 0xf8, 0xf0, 0x0e, 0x39, 0x2f, 0x3c, 0x64, 0x34, 0xe3, 0x09, 0xfd, 0x2b, 0x33, 0xc7, 0xfe, 0x62, 0x2b, 0x84, 0xdf, 0x2b, 0xd2, 0x7c, 0x26, 0x01, 0x70, 0x66, 0x5b, 0x85, 0xc2, 0xbe, 0x88, 0x37, 0xf1, 0x30, 0xac, 0xb8, 0x76, 0xa3,
- /* (2^343)P */ 0x6e, 0x01, 0xf0, 0x55, 0x35, 0xe4, 0xbd, 0x43, 0x62, 0x9d, 0xd6, 0x11, 0xef, 0x6f, 0xb8, 0x8c, 0xaa, 0x98, 0x87, 0xc6, 0x6d, 0xc4, 0xcc, 0x74, 0x92, 0x53, 0x4a, 0xdf, 0xe4, 0x08, 0x89, 0x17, 0xd0, 0x0f, 0xf4, 0x00, 0x60, 0x78, 0x08, 0x44, 0xb5, 0xda, 0x18, 0xed, 0x98, 0xc8, 0x61, 0x3d, 0x39, 0xdb, 0xcf, 0x1d, 0x49, 0x40, 0x65, 0x75,
- /* (2^344)P */ 0x8e, 0x10, 0xae, 0x5f, 0x06, 0xd2, 0x95, 0xfd, 0x20, 0x16, 0x49, 0x5b, 0x57, 0xbe, 0x22, 0x8b, 0x43, 0xfb, 0xe6, 0xcc, 0x26, 0xa5, 0x5d, 0xd3, 0x68, 0xc5, 0xf9, 0x5a, 0x86, 0x24, 0x87, 0x27, 0x05, 0xfd, 0xe2, 0xff, 0xb3, 0xa3, 0x7b, 0x37, 0x59, 0xc5, 0x4e, 0x14, 0x94, 0xf9, 0x3b, 0xcb, 0x7c, 0xed, 0xca, 0x1d, 0xb2, 0xac, 0x05, 0x4a,
- /* (2^345)P */ 0xf4, 0xd1, 0x81, 0xeb, 0x89, 0xbf, 0xfe, 0x1e, 0x41, 0x92, 0x29, 0xee, 0xe1, 0x43, 0xf5, 0x86, 0x1d, 0x2f, 0xbb, 0x1e, 0x84, 0x5d, 0x7b, 0x8d, 0xd5, 0xda, 0xee, 0x1e, 0x8a, 0xd0, 0x27, 0xf2, 0x60, 0x51, 0x59, 0x82, 0xf4, 0x84, 0x2b, 0x5b, 0x14, 0x2d, 0x81, 0x82, 0x3e, 0x2b, 0xb4, 0x6d, 0x51, 0x4f, 0xc5, 0xcb, 0xbf, 0x74, 0xe3, 0xb4,
- /* (2^346)P */ 0x19, 0x2f, 0x22, 0xb3, 0x04, 0x5f, 0x81, 0xca, 0x05, 0x60, 0xb9, 0xaa, 0xee, 0x0e, 0x2f, 0x48, 0x38, 0xf9, 0x91, 0xb4, 0x66, 0xe4, 0x57, 0x28, 0x54, 0x10, 0xe9, 0x61, 0x9d, 0xd4, 0x90, 0x75, 0xb1, 0x39, 0x23, 0xb6, 0xfc, 0x82, 0xe0, 0xfa, 0xbb, 0x5c, 0x6e, 0xc3, 0x44, 0x13, 0x00, 0x83, 0x55, 0x9e, 0x8e, 0x10, 0x61, 0x81, 0x91, 0x04,
- /* (2^347)P */ 0x5f, 0x2a, 0xd7, 0x81, 0xd9, 0x9c, 0xbb, 0x79, 0xbc, 0x62, 0x56, 0x98, 0x03, 0x5a, 0x18, 0x85, 0x2a, 0x9c, 0xd0, 0xfb, 0xd2, 0xb1, 0xaf, 0xef, 0x0d, 0x24, 0xc5, 0xfa, 0x39, 0xbb, 0x6b, 0xed, 0xa4, 0xdf, 0xe4, 0x87, 0xcd, 0x41, 0xd3, 0x72, 0x32, 0xc6, 0x28, 0x21, 0xb1, 0xba, 0x8b, 0xa3, 0x91, 0x79, 0x76, 0x22, 0x25, 0x10, 0x61, 0xd1,
- /* (2^348)P */ 0x73, 0xb5, 0x32, 0x97, 0xdd, 0xeb, 0xdd, 0x22, 0x22, 0xf1, 0x33, 0x3c, 0x77, 0x56, 0x7d, 0x6b, 0x48, 0x2b, 0x05, 0x81, 0x03, 0x03, 0x91, 0x9a, 0xe3, 0x5e, 0xd4, 0xee, 0x3f, 0xf8, 0xbb, 0x50, 0x21, 0x32, 0x4c, 0x4a, 0x58, 0x49, 0xde, 0x0c, 0xde, 0x30, 0x82, 0x3d, 0x92, 0xf0, 0x6c, 0xcc, 0x32, 0x3e, 0xd2, 0x78, 0x8a, 0x6e, 0x2c, 0xd0,
- /* (2^349)P */ 0xf0, 0xf7, 0xa1, 0x0b, 0xc1, 0x74, 0x85, 0xa8, 0xe9, 0xdd, 0x48, 0xa1, 0xc0, 0x16, 0xd8, 0x2b, 0x61, 0x08, 0xc2, 0x2b, 0x30, 0x26, 0x79, 0xce, 0x9e, 0xfd, 0x39, 0xd7, 0x81, 0xa4, 0x63, 0x8c, 0xd5, 0x74, 0xa0, 0x88, 0xfa, 0x03, 0x30, 0xe9, 0x7f, 0x2b, 0xc6, 0x02, 0xc9, 0x5e, 0xe4, 0xd5, 0x4d, 0x92, 0xd0, 0xf6, 0xf2, 0x5b, 0x79, 0x08,
- /* (2^350)P */ 0x34, 0x89, 0x81, 0x43, 0xd1, 0x94, 0x2c, 0x10, 0x54, 0x9b, 0xa0, 0xe5, 0x44, 0xe8, 0xc2, 0x2f, 0x3e, 0x0e, 0x74, 0xae, 0xba, 0xe2, 0xac, 0x85, 0x6b, 0xd3, 0x5c, 0x97, 0xf7, 0x90, 0xf1, 0x12, 0xc0, 0x03, 0xc8, 0x1f, 0x37, 0x72, 0x8c, 0x9b, 0x9c, 0x17, 0x96, 0x9d, 0xc7, 0xbf, 0xa3, 0x3f, 0x44, 0x3d, 0x87, 0x81, 0xbd, 0x81, 0xa6, 0x5f,
- /* (2^351)P */ 0xe4, 0xff, 0x78, 0x62, 0x82, 0x5b, 0x76, 0x58, 0xf5, 0x5b, 0xa6, 0xc4, 0x53, 0x11, 0x3b, 0x7b, 0xaa, 0x67, 0xf8, 0xea, 0x3b, 0x5d, 0x9a, 0x2e, 0x04, 0xeb, 0x4a, 0x24, 0xfb, 0x56, 0xf0, 0xa8, 0xd4, 0x14, 0xed, 0x0f, 0xfd, 0xc5, 0x26, 0x17, 0x2a, 0xf0, 0xb9, 0x13, 0x8c, 0xbd, 0x65, 0x14, 0x24, 0x95, 0x27, 0x12, 0x63, 0x2a, 0x09, 0x18,
- /* (2^352)P */ 0xe1, 0x5c, 0xe7, 0xe0, 0x00, 0x6a, 0x96, 0xf2, 0x49, 0x6a, 0x39, 0xa5, 0xe0, 0x17, 0x79, 0x4a, 0x63, 0x07, 0x62, 0x09, 0x61, 0x1b, 0x6e, 0xa9, 0xb5, 0x62, 0xb7, 0xde, 0xdf, 0x80, 0x4c, 0x5a, 0x99, 0x73, 0x59, 0x9d, 0xfb, 0xb1, 0x5e, 0xbe, 0xb8, 0xb7, 0x63, 0x93, 0xe8, 0xad, 0x5e, 0x1f, 0xae, 0x59, 0x1c, 0xcd, 0xb4, 0xc2, 0xb3, 0x8a,
- /* (2^353)P */ 0x78, 0x53, 0xa1, 0x4c, 0x70, 0x9c, 0x63, 0x7e, 0xb3, 0x12, 0x40, 0x5f, 0xbb, 0x23, 0xa7, 0xf7, 0x77, 0x96, 0x5b, 0x4d, 0x91, 0x10, 0x52, 0x85, 0x9e, 0xa5, 0x38, 0x0b, 0xfd, 0x25, 0x01, 0x4b, 0xfa, 0x4d, 0xd3, 0x3f, 0x78, 0x74, 0x42, 0xff, 0x62, 0x2d, 0x27, 0xdc, 0x9d, 0xd1, 0x29, 0x76, 0x2e, 0x78, 0xb3, 0x35, 0xfa, 0x15, 0xd5, 0x38,
- /* (2^354)P */ 0x8b, 0xc7, 0x43, 0xce, 0xf0, 0x5e, 0xf1, 0x0d, 0x02, 0x38, 0xe8, 0x82, 0xc9, 0x25, 0xad, 0x2d, 0x27, 0xa4, 0x54, 0x18, 0xb2, 0x30, 0x73, 0xa4, 0x41, 0x08, 0xe4, 0x86, 0xe6, 0x8c, 0xe9, 0x2a, 0x34, 0xb3, 0xd6, 0x61, 0x8f, 0x66, 0x26, 0x08, 0xb6, 0x06, 0x33, 0xaa, 0x12, 0xac, 0x72, 0xec, 0x2e, 0x52, 0xa3, 0x25, 0x3e, 0xd7, 0x62, 0xe8,
- /* (2^355)P */ 0xc4, 0xbb, 0x89, 0xc8, 0x40, 0xcc, 0x84, 0xec, 0x4a, 0xd9, 0xc4, 0x55, 0x78, 0x00, 0xcf, 0xd8, 0xe9, 0x24, 0x59, 0xdc, 0x5e, 0xf0, 0x66, 0xa1, 0x83, 0xae, 0x97, 0x18, 0xc5, 0x54, 0x27, 0xa2, 0x21, 0x52, 0x03, 0x31, 0x5b, 0x11, 0x67, 0xf6, 0x12, 0x00, 0x87, 0x2f, 0xff, 0x59, 0x70, 0x8f, 0x6d, 0x71, 0xab, 0xab, 0x24, 0xb8, 0xba, 0x35,
- /* (2^356)P */ 0x69, 0x43, 0xa7, 0x14, 0x06, 0x96, 0xe9, 0xc2, 0xe3, 0x2b, 0x45, 0x22, 0xc0, 0xd0, 0x2f, 0x34, 0xd1, 0x01, 0x99, 0xfc, 0x99, 0x38, 0xa1, 0x25, 0x2e, 0x59, 0x6c, 0x27, 0xc9, 0xeb, 0x7b, 0xdc, 0x4e, 0x26, 0x68, 0xba, 0xfa, 0xec, 0x02, 0x05, 0x64, 0x80, 0x30, 0x20, 0x5c, 0x26, 0x7f, 0xaf, 0x95, 0x17, 0x3d, 0x5c, 0x9e, 0x96, 0x96, 0xaf,
- /* (2^357)P */ 0xa6, 0xba, 0x21, 0x29, 0x32, 0xe2, 0x98, 0xde, 0x9b, 0x6d, 0x0b, 0x44, 0x91, 0xa8, 0x3e, 0xd4, 0xb8, 0x04, 0x6c, 0xf6, 0x04, 0x39, 0xbd, 0x52, 0x05, 0x15, 0x27, 0x78, 0x8e, 0x55, 0xac, 0x79, 0xc5, 0xe6, 0x00, 0x7f, 0x90, 0xa2, 0xdd, 0x07, 0x13, 0xe0, 0x24, 0x70, 0x5c, 0x0f, 0x4d, 0xa9, 0xf9, 0xae, 0xcb, 0x34, 0x10, 0x9d, 0x89, 0x9d,
- /* (2^358)P */ 0x12, 0xe0, 0xb3, 0x9f, 0xc4, 0x96, 0x1d, 0xcf, 0xed, 0x99, 0x64, 0x28, 0x8d, 0xc7, 0x31, 0x82, 0xee, 0x5e, 0x75, 0x48, 0xff, 0x3a, 0xf2, 0x09, 0x34, 0x03, 0x93, 0x52, 0x19, 0xb2, 0xc5, 0x81, 0x93, 0x45, 0x5e, 0x59, 0x21, 0x2b, 0xec, 0x89, 0xba, 0x36, 0x6e, 0xf9, 0x82, 0x75, 0x7e, 0x82, 0x3f, 0xaa, 0xe2, 0xe3, 0x3b, 0x94, 0xfd, 0x98,
- /* (2^359)P */ 0x7c, 0xdb, 0x75, 0x31, 0x61, 0xfb, 0x15, 0x28, 0x94, 0xd7, 0xc3, 0x5a, 0xa9, 0xa1, 0x0a, 0x66, 0x0f, 0x2b, 0x13, 0x3e, 0x42, 0xb5, 0x28, 0x3a, 0xca, 0x83, 0xf3, 0x61, 0x22, 0xf4, 0x40, 0xc5, 0xdf, 0xe7, 0x31, 0x9f, 0x7e, 0x51, 0x75, 0x06, 0x9d, 0x51, 0xc8, 0xe7, 0x9f, 0xc3, 0x71, 0x4f, 0x3d, 0x5b, 0xfb, 0xe9, 0x8e, 0x08, 0x40, 0x8e,
- /* (2^360)P */ 0xf7, 0x31, 0xad, 0x50, 0x5d, 0x25, 0x93, 0x73, 0x68, 0xf6, 0x7c, 0x89, 0x5a, 0x3d, 0x9f, 0x9b, 0x05, 0x82, 0xe7, 0x70, 0x4b, 0x19, 0xaa, 0xcf, 0xff, 0xde, 0x50, 0x8f, 0x2f, 0x69, 0xd3, 0xf0, 0x99, 0x51, 0x6b, 0x9d, 0xb6, 0x56, 0x6f, 0xf8, 0x4c, 0x74, 0x8b, 0x4c, 0x91, 0xf9, 0xa9, 0xb1, 0x3e, 0x07, 0xdf, 0x0b, 0x27, 0x8a, 0xb1, 0xed,
- /* (2^361)P */ 0xfb, 0x67, 0xd9, 0x48, 0xd2, 0xe4, 0x44, 0x9b, 0x43, 0x15, 0x8a, 0xeb, 0x00, 0x53, 0xad, 0x25, 0xc7, 0x7e, 0x19, 0x30, 0x87, 0xb7, 0xd5, 0x5f, 0x04, 0xf8, 0xaa, 0xdd, 0x57, 0xae, 0x34, 0x75, 0xe2, 0x84, 0x4b, 0x54, 0x60, 0x37, 0x95, 0xe4, 0xd3, 0xec, 0xac, 0xef, 0x47, 0x31, 0xa3, 0xc8, 0x31, 0x22, 0xdb, 0x26, 0xe7, 0x6a, 0xb5, 0xad,
- /* (2^362)P */ 0x44, 0x09, 0x5c, 0x95, 0xe4, 0x72, 0x3c, 0x1a, 0xd1, 0xac, 0x42, 0x51, 0x99, 0x6f, 0xfa, 0x1f, 0xf2, 0x22, 0xbe, 0xff, 0x7b, 0x66, 0xf5, 0x6c, 0xb3, 0x66, 0xc7, 0x4d, 0x78, 0x31, 0x83, 0x80, 0xf5, 0x41, 0xe9, 0x7f, 0xbe, 0xf7, 0x23, 0x49, 0x6b, 0x84, 0x4e, 0x7e, 0x47, 0x07, 0x6e, 0x74, 0xdf, 0xe5, 0x9d, 0x9e, 0x56, 0x2a, 0xc0, 0xbc,
- /* (2^363)P */ 0xac, 0x10, 0x80, 0x8c, 0x7c, 0xfa, 0x83, 0xdf, 0xb3, 0xd0, 0xc4, 0xbe, 0xfb, 0x9f, 0xac, 0xc9, 0xc3, 0x40, 0x95, 0x0b, 0x09, 0x23, 0xda, 0x63, 0x67, 0xcf, 0xe7, 0x9f, 0x7d, 0x7b, 0x6b, 0xe2, 0xe6, 0x6d, 0xdb, 0x87, 0x9e, 0xa6, 0xff, 0x6d, 0xab, 0xbd, 0xfb, 0x54, 0x84, 0x68, 0xcf, 0x89, 0xf1, 0xd0, 0xe2, 0x85, 0x61, 0xdc, 0x22, 0xd1,
- /* (2^364)P */ 0xa8, 0x48, 0xfb, 0x8c, 0x6a, 0x63, 0x01, 0x72, 0x43, 0x43, 0xeb, 0x21, 0xa3, 0x00, 0x8a, 0xc0, 0x87, 0x51, 0x9e, 0x86, 0x75, 0x16, 0x79, 0xf9, 0x6b, 0x11, 0x80, 0x62, 0xc2, 0x9d, 0xb8, 0x8c, 0x30, 0x8e, 0x8d, 0x03, 0x52, 0x7e, 0x31, 0x59, 0x38, 0xf9, 0x25, 0xc7, 0x0f, 0xc7, 0xa8, 0x2b, 0x5c, 0x80, 0xfa, 0x90, 0xa2, 0x63, 0xca, 0xe7,
- /* (2^365)P */ 0xf1, 0x5d, 0xb5, 0xd9, 0x20, 0x10, 0x7d, 0x0f, 0xc5, 0x50, 0x46, 0x07, 0xff, 0x02, 0x75, 0x2b, 0x4a, 0xf3, 0x39, 0x91, 0x72, 0xb7, 0xd5, 0xcc, 0x38, 0xb8, 0xe7, 0x36, 0x26, 0x5e, 0x11, 0x97, 0x25, 0xfb, 0x49, 0x68, 0xdc, 0xb4, 0x46, 0x87, 0x5c, 0xc2, 0x7f, 0xaa, 0x7d, 0x36, 0x23, 0xa6, 0xc6, 0x53, 0xec, 0xbc, 0x57, 0x47, 0xc1, 0x2b,
- /* (2^366)P */ 0x25, 0x5d, 0x7d, 0x95, 0xda, 0x0b, 0x8f, 0x78, 0x1e, 0x19, 0x09, 0xfa, 0x67, 0xe0, 0xa0, 0x17, 0x24, 0x76, 0x6c, 0x30, 0x1f, 0x62, 0x3d, 0xbe, 0x45, 0x70, 0xcc, 0xb6, 0x1e, 0x68, 0x06, 0x25, 0x68, 0x16, 0x1a, 0x33, 0x3f, 0x90, 0xc7, 0x78, 0x2d, 0x98, 0x3c, 0x2f, 0xb9, 0x2d, 0x94, 0x0b, 0xfb, 0x49, 0x56, 0x30, 0xd7, 0xc1, 0xe6, 0x48,
- /* (2^367)P */ 0x7a, 0xd1, 0xe0, 0x8e, 0x67, 0xfc, 0x0b, 0x50, 0x1f, 0x84, 0x98, 0xfa, 0xaf, 0xae, 0x2e, 0x31, 0x27, 0xcf, 0x3f, 0xf2, 0x6e, 0x8d, 0x81, 0x8f, 0xd2, 0x5f, 0xde, 0xd3, 0x5e, 0xe9, 0xe7, 0x13, 0x48, 0x83, 0x5a, 0x4e, 0x84, 0xd1, 0x58, 0xcf, 0x6b, 0x84, 0xdf, 0x13, 0x1d, 0x91, 0x85, 0xe8, 0xcb, 0x29, 0x79, 0xd2, 0xca, 0xac, 0x6a, 0x93,
- /* (2^368)P */ 0x53, 0x82, 0xce, 0x61, 0x96, 0x88, 0x6f, 0xe1, 0x4a, 0x4c, 0x1e, 0x30, 0x73, 0xe8, 0x74, 0xde, 0x40, 0x2b, 0xe0, 0xc4, 0xb5, 0xd8, 0x7c, 0x15, 0xe7, 0xe1, 0xb1, 0xe0, 0xd6, 0x88, 0xb1, 0x6a, 0x57, 0x19, 0x6a, 0x22, 0x66, 0x57, 0xf6, 0x8d, 0xfd, 0xc0, 0xf2, 0xa3, 0x03, 0x56, 0xfb, 0x2e, 0x75, 0x5e, 0xc7, 0x8e, 0x22, 0x96, 0x5c, 0x06,
- /* (2^369)P */ 0x98, 0x7e, 0xbf, 0x3e, 0xbf, 0x24, 0x9d, 0x15, 0xd3, 0xf6, 0xd3, 0xd2, 0xf0, 0x11, 0xf2, 0xdb, 0x36, 0x23, 0x38, 0xf7, 0x1d, 0x71, 0x20, 0xd2, 0x54, 0x7f, 0x1e, 0x24, 0x8f, 0xe2, 0xaa, 0xf7, 0x3f, 0x6b, 0x41, 0x4e, 0xdc, 0x0e, 0xec, 0xe8, 0x35, 0x0a, 0x08, 0x6d, 0x89, 0x5b, 0x32, 0x91, 0x01, 0xb6, 0xe0, 0x2c, 0xc6, 0xa1, 0xbe, 0xb4,
- /* (2^370)P */ 0x29, 0xf2, 0x1e, 0x1c, 0xdc, 0x68, 0x8a, 0x43, 0x87, 0x2c, 0x48, 0xb3, 0x9e, 0xed, 0xd2, 0x82, 0x46, 0xac, 0x2f, 0xef, 0x93, 0x34, 0x37, 0xca, 0x64, 0x8d, 0xc9, 0x06, 0x90, 0xbb, 0x78, 0x0a, 0x3c, 0x4c, 0xcf, 0x35, 0x7a, 0x0f, 0xf7, 0xa7, 0xf4, 0x2f, 0x45, 0x69, 0x3f, 0xa9, 0x5d, 0xce, 0x7b, 0x8a, 0x84, 0xc3, 0xae, 0xf4, 0xda, 0xd5,
- /* (2^371)P */ 0xca, 0xba, 0x95, 0x43, 0x05, 0x7b, 0x06, 0xd9, 0x5c, 0x0a, 0x18, 0x5f, 0x6a, 0x6a, 0xce, 0xc0, 0x3d, 0x95, 0x51, 0x0e, 0x1a, 0xbe, 0x85, 0x7a, 0xf2, 0x69, 0xec, 0xc0, 0x8c, 0xca, 0xa3, 0x32, 0x0a, 0x76, 0x50, 0xc6, 0x76, 0x61, 0x00, 0x89, 0xbf, 0x6e, 0x0f, 0x48, 0x90, 0x31, 0x93, 0xec, 0x34, 0x70, 0xf0, 0xc3, 0x8d, 0xf0, 0x0f, 0xb5,
- /* (2^372)P */ 0xbe, 0x23, 0xe2, 0x18, 0x99, 0xf1, 0xed, 0x8a, 0xf6, 0xc9, 0xac, 0xb8, 0x1e, 0x9a, 0x3c, 0x15, 0xae, 0xd7, 0x6d, 0xb3, 0x04, 0xee, 0x5b, 0x0d, 0x1e, 0x79, 0xb7, 0xf9, 0xf9, 0x8d, 0xad, 0xf9, 0x8f, 0x5a, 0x6a, 0x7b, 0xd7, 0x9b, 0xca, 0x62, 0xfe, 0x9c, 0xc0, 0x6f, 0x6d, 0x9d, 0x76, 0xa3, 0x69, 0xb9, 0x4c, 0xa1, 0xc4, 0x0c, 0x76, 0xaa,
- /* (2^373)P */ 0x1c, 0x06, 0xfe, 0x3f, 0x45, 0x70, 0xcd, 0x97, 0xa9, 0xa2, 0xb1, 0xd3, 0xf2, 0xa5, 0x0c, 0x49, 0x2c, 0x75, 0x73, 0x1f, 0xcf, 0x00, 0xaf, 0xd5, 0x2e, 0xde, 0x0d, 0x8f, 0x8f, 0x7c, 0xc4, 0x58, 0xce, 0xd4, 0xf6, 0x24, 0x19, 0x2e, 0xd8, 0xc5, 0x1d, 0x1a, 0x3f, 0xb8, 0x4f, 0xbc, 0x7d, 0xbd, 0x68, 0xe3, 0x81, 0x98, 0x1b, 0xa8, 0xc9, 0xd9,
- /* (2^374)P */ 0x39, 0x95, 0x78, 0x24, 0x6c, 0x38, 0xe4, 0xe7, 0xd0, 0x8d, 0xb9, 0x38, 0x71, 0x5e, 0xc1, 0x62, 0x80, 0xcc, 0xcb, 0x8c, 0x97, 0xca, 0xf8, 0xb9, 0xd9, 0x9c, 0xce, 0x72, 0x7b, 0x70, 0xee, 0x5f, 0xea, 0xa2, 0xdf, 0xa9, 0x14, 0x10, 0xf9, 0x6e, 0x59, 0x9f, 0x9c, 0xe0, 0x0c, 0xb2, 0x07, 0x97, 0xcd, 0xd2, 0x89, 0x16, 0xfd, 0x9c, 0xa8, 0xa5,
- /* (2^375)P */ 0x5a, 0x61, 0xf1, 0x59, 0x7c, 0x38, 0xda, 0xe2, 0x85, 0x99, 0x68, 0xe9, 0xc9, 0xf7, 0x32, 0x7e, 0xc4, 0xca, 0xb7, 0x11, 0x08, 0x69, 0x2b, 0x66, 0x02, 0xf7, 0x2e, 0x18, 0xc3, 0x8e, 0xe1, 0xf9, 0xc5, 0x19, 0x9a, 0x0a, 0x9c, 0x07, 0xba, 0xc7, 0x9c, 0x03, 0x34, 0x89, 0x99, 0x67, 0x0b, 0x16, 0x4b, 0x07, 0x36, 0x16, 0x36, 0x2c, 0xe2, 0xa1,
- /* (2^376)P */ 0x70, 0x10, 0x91, 0x27, 0xa8, 0x24, 0x8e, 0x29, 0x04, 0x6f, 0x79, 0x1f, 0xd3, 0xa5, 0x68, 0xd3, 0x0b, 0x7d, 0x56, 0x4d, 0x14, 0x57, 0x7b, 0x2e, 0x00, 0x9f, 0x9a, 0xfd, 0x6c, 0x63, 0x18, 0x81, 0xdb, 0x9d, 0xb7, 0xd7, 0xa4, 0x1e, 0xe8, 0x40, 0xf1, 0x4c, 0xa3, 0x01, 0xd5, 0x4b, 0x75, 0xea, 0xdd, 0x97, 0xfd, 0x5b, 0xb2, 0x66, 0x6a, 0x24,
- /* (2^377)P */ 0x72, 0x11, 0xfe, 0x73, 0x1b, 0xd3, 0xea, 0x7f, 0x93, 0x15, 0x15, 0x05, 0xfe, 0x40, 0xe8, 0x28, 0xd8, 0x50, 0x47, 0x66, 0xfa, 0xb7, 0xb5, 0x04, 0xba, 0x35, 0x1e, 0x32, 0x9f, 0x5f, 0x32, 0xba, 0x3d, 0xd1, 0xed, 0x9a, 0x76, 0xca, 0xa3, 0x3e, 0x77, 0xd8, 0xd8, 0x7c, 0x5f, 0x68, 0x42, 0xb5, 0x86, 0x7f, 0x3b, 0xc9, 0xc1, 0x89, 0x64, 0xda,
- /* (2^378)P */ 0xd5, 0xd4, 0x17, 0x31, 0xfc, 0x6a, 0xfd, 0xb8, 0xe8, 0xe5, 0x3e, 0x39, 0x06, 0xe4, 0xd1, 0x90, 0x2a, 0xca, 0xf6, 0x54, 0x6c, 0x1b, 0x2f, 0x49, 0x97, 0xb1, 0x2a, 0x82, 0x43, 0x3d, 0x1f, 0x8b, 0xe2, 0x47, 0xc5, 0x24, 0xa8, 0xd5, 0x53, 0x29, 0x7d, 0xc6, 0x87, 0xa6, 0x25, 0x3a, 0x64, 0xdd, 0x71, 0x08, 0x9e, 0xcd, 0xe9, 0x45, 0xc7, 0xba,
- /* (2^379)P */ 0x37, 0x72, 0x6d, 0x13, 0x7a, 0x8d, 0x04, 0x31, 0xe6, 0xe3, 0x9e, 0x36, 0x71, 0x3e, 0xc0, 0x1e, 0xe3, 0x71, 0xd3, 0x49, 0x4e, 0x4a, 0x36, 0x42, 0x68, 0x68, 0x61, 0xc7, 0x3c, 0xdb, 0x81, 0x49, 0xf7, 0x91, 0x4d, 0xea, 0x4c, 0x4f, 0x98, 0xc6, 0x7e, 0x60, 0x84, 0x4b, 0x6a, 0x37, 0xbb, 0x52, 0xf7, 0xce, 0x02, 0xe4, 0xad, 0xd1, 0x3c, 0xa7,
- /* (2^380)P */ 0x51, 0x06, 0x2d, 0xf8, 0x08, 0xe8, 0xf1, 0x0c, 0xe5, 0xa9, 0xac, 0x29, 0x73, 0x3b, 0xed, 0x98, 0x5f, 0x55, 0x08, 0x38, 0x51, 0x44, 0x36, 0x5d, 0xea, 0xc3, 0xb8, 0x0e, 0xa0, 0x4f, 0xd2, 0x79, 0xe9, 0x98, 0xc3, 0xf5, 0x00, 0xb9, 0x26, 0x27, 0x42, 0xa8, 0x07, 0xc1, 0x12, 0x31, 0xc1, 0xc3, 0x3c, 0x3b, 0x7a, 0x72, 0x97, 0xc2, 0x70, 0x3a,
- /* (2^381)P */ 0xf4, 0xb2, 0xba, 0x32, 0xbc, 0xa9, 0x2f, 0x87, 0xc7, 0x3c, 0x45, 0xcd, 0xae, 0xe2, 0x13, 0x6d, 0x3a, 0xf2, 0xf5, 0x66, 0x97, 0x29, 0xaf, 0x53, 0x9f, 0xda, 0xea, 0x14, 0xdf, 0x04, 0x98, 0x19, 0x95, 0x9e, 0x2a, 0x00, 0x5c, 0x9d, 0x1d, 0xf0, 0x39, 0x23, 0xff, 0xfc, 0xca, 0x36, 0xb7, 0xde, 0xdf, 0x37, 0x78, 0x52, 0x21, 0xfa, 0x19, 0x10,
- /* (2^382)P */ 0x50, 0x20, 0x73, 0x74, 0x62, 0x21, 0xf2, 0xf7, 0x9b, 0x66, 0x85, 0x34, 0x74, 0xd4, 0x9d, 0x60, 0xd7, 0xbc, 0xc8, 0x46, 0x3b, 0xb8, 0x80, 0x42, 0x15, 0x0a, 0x6c, 0x35, 0x1a, 0x69, 0xf0, 0x1d, 0x4b, 0x29, 0x54, 0x5a, 0x9a, 0x48, 0xec, 0x9f, 0x37, 0x74, 0x91, 0xd0, 0xd1, 0x9e, 0x00, 0xc2, 0x76, 0x56, 0xd6, 0xa0, 0x15, 0x14, 0x83, 0x59,
- /* (2^383)P */ 0xc2, 0xf8, 0x22, 0x20, 0x23, 0x07, 0xbd, 0x1d, 0x6f, 0x1e, 0x8c, 0x56, 0x06, 0x6a, 0x4b, 0x9f, 0xe2, 0xa9, 0x92, 0x46, 0x4b, 0x46, 0x59, 0xd7, 0xe1, 0xda, 0x14, 0x98, 0x07, 0x65, 0x7e, 0x28, 0x20, 0xf2, 0x9d, 0x4f, 0x36, 0x5c, 0x92, 0xe0, 0x9d, 0xfe, 0x3e, 0xda, 0xe4, 0x47, 0x19, 0x3c, 0x00, 0x7f, 0x22, 0xf2, 0x9e, 0x51, 0xae, 0x4d,
- /* (2^384)P */ 0xbe, 0x8c, 0x1b, 0x10, 0xb6, 0xad, 0xcc, 0xcc, 0xd8, 0x5e, 0x21, 0xa6, 0xfb, 0xf1, 0xf6, 0xbd, 0x0a, 0x24, 0x67, 0xb4, 0x57, 0x7a, 0xbc, 0xe8, 0xe9, 0xff, 0xee, 0x0a, 0x1f, 0xee, 0xbd, 0xc8, 0x44, 0xed, 0x2b, 0xbb, 0x55, 0x1f, 0xdd, 0x7c, 0xb3, 0xeb, 0x3f, 0x63, 0xa1, 0x28, 0x91, 0x21, 0xab, 0x71, 0xc6, 0x4c, 0xd0, 0xe9, 0xb0, 0x21,
- /* (2^385)P */ 0xad, 0xc9, 0x77, 0x2b, 0xee, 0x89, 0xa4, 0x7b, 0xfd, 0xf9, 0xf6, 0x14, 0xe4, 0xed, 0x1a, 0x16, 0x9b, 0x78, 0x41, 0x43, 0xa8, 0x83, 0x72, 0x06, 0x2e, 0x7c, 0xdf, 0xeb, 0x7e, 0xdd, 0xd7, 0x8b, 0xea, 0x9a, 0x2b, 0x03, 0xba, 0x57, 0xf3, 0xf1, 0xd9, 0xe5, 0x09, 0xc5, 0x98, 0x61, 0x1c, 0x51, 0x6d, 0x5d, 0x6e, 0xfb, 0x5e, 0x95, 0x9f, 0xb5,
- /* (2^386)P */ 0x23, 0xe2, 0x1e, 0x95, 0xa3, 0x5e, 0x42, 0x10, 0xc7, 0xc3, 0x70, 0xbf, 0x4b, 0x6b, 0x83, 0x36, 0x93, 0xb7, 0x68, 0x47, 0x88, 0x3a, 0x10, 0x88, 0x48, 0x7f, 0x8c, 0xae, 0x54, 0x10, 0x02, 0xa4, 0x52, 0x8f, 0x8d, 0xf7, 0x26, 0x4f, 0x50, 0xc3, 0x6a, 0xe2, 0x4e, 0x3b, 0x4c, 0xb9, 0x8a, 0x14, 0x15, 0x6d, 0x21, 0x29, 0xb3, 0x6e, 0x4e, 0xd0,
- /* (2^387)P */ 0x4c, 0x8a, 0x18, 0x3f, 0xb7, 0x20, 0xfd, 0x3e, 0x54, 0xca, 0x68, 0x3c, 0xea, 0x6f, 0xf4, 0x6b, 0xa2, 0xbd, 0x01, 0xbd, 0xfe, 0x08, 0xa8, 0xd8, 0xc2, 0x20, 0x36, 0x05, 0xcd, 0xe9, 0xf3, 0x9e, 0xfa, 0x85, 0x66, 0x8f, 0x4b, 0x1d, 0x8c, 0x64, 0x4f, 0xb8, 0xc6, 0x0f, 0x5b, 0x57, 0xd8, 0x24, 0x19, 0x5a, 0x14, 0x4b, 0x92, 0xd3, 0x96, 0xbc,
- /* (2^388)P */ 0xa9, 0x3f, 0xc9, 0x6c, 0xca, 0x64, 0x1e, 0x6f, 0xdf, 0x65, 0x7f, 0x9a, 0x47, 0x6b, 0x8a, 0x60, 0x31, 0xa6, 0x06, 0xac, 0x69, 0x30, 0xe6, 0xea, 0x63, 0x42, 0x26, 0x5f, 0xdb, 0xd0, 0xf2, 0x8e, 0x34, 0x0a, 0x3a, 0xeb, 0xf3, 0x79, 0xc8, 0xb7, 0x60, 0x56, 0x5c, 0x37, 0x95, 0x71, 0xf8, 0x7f, 0x49, 0x3e, 0x9e, 0x01, 0x26, 0x1e, 0x80, 0x9f,
- /* (2^389)P */ 0xf8, 0x16, 0x9a, 0xaa, 0xb0, 0x28, 0xb5, 0x8e, 0xd0, 0x60, 0xe5, 0x26, 0xa9, 0x47, 0xc4, 0x5c, 0xa9, 0x39, 0xfe, 0x0a, 0xd8, 0x07, 0x2b, 0xb3, 0xce, 0xf1, 0xea, 0x1a, 0xf4, 0x7b, 0x98, 0x31, 0x3d, 0x13, 0x29, 0x80, 0xe8, 0x0d, 0xcf, 0x56, 0x39, 0x86, 0x50, 0x0c, 0xb3, 0x18, 0xf4, 0xc5, 0xca, 0xf2, 0x6f, 0xcd, 0x8d, 0xd5, 0x02, 0xb0,
- /* (2^390)P */ 0xbf, 0x39, 0x3f, 0xac, 0x6d, 0x1a, 0x6a, 0xe4, 0x42, 0x24, 0xd6, 0x41, 0x9d, 0xb9, 0x5b, 0x46, 0x73, 0x93, 0x76, 0xaa, 0xb7, 0x37, 0x36, 0xa6, 0x09, 0xe5, 0x04, 0x3b, 0x66, 0xc4, 0x29, 0x3e, 0x41, 0xc2, 0xcb, 0xe5, 0x17, 0xd7, 0x34, 0x67, 0x1d, 0x2c, 0x12, 0xec, 0x24, 0x7a, 0x40, 0xa2, 0x45, 0x41, 0xf0, 0x75, 0xed, 0x43, 0x30, 0xc9,
- /* (2^391)P */ 0x80, 0xf6, 0x47, 0x5b, 0xad, 0x54, 0x02, 0xbc, 0xdd, 0xa4, 0xb2, 0xd7, 0x42, 0x95, 0xf2, 0x0d, 0x1b, 0xef, 0x37, 0xa7, 0xb4, 0x34, 0x04, 0x08, 0x71, 0x1b, 0xd3, 0xdf, 0xa1, 0xf0, 0x2b, 0xfa, 0xc0, 0x1f, 0xf3, 0x44, 0xb5, 0xc6, 0x47, 0x3d, 0x65, 0x67, 0x45, 0x4d, 0x2f, 0xde, 0x52, 0x73, 0xfc, 0x30, 0x01, 0x6b, 0xc1, 0x03, 0xd8, 0xd7,
- /* (2^392)P */ 0x1c, 0x67, 0x55, 0x3e, 0x01, 0x17, 0x0f, 0x3e, 0xe5, 0x34, 0x58, 0xfc, 0xcb, 0x71, 0x24, 0x74, 0x5d, 0x36, 0x1e, 0x89, 0x2a, 0x63, 0xf8, 0xf8, 0x9f, 0x50, 0x9f, 0x32, 0x92, 0x29, 0xd8, 0x1a, 0xec, 0x76, 0x57, 0x6c, 0x67, 0x12, 0x6a, 0x6e, 0xef, 0x97, 0x1f, 0xc3, 0x77, 0x60, 0x3c, 0x22, 0xcb, 0xc7, 0x04, 0x1a, 0x89, 0x2d, 0x10, 0xa6,
- /* (2^393)P */ 0x12, 0xf5, 0xa9, 0x26, 0x16, 0xd9, 0x3c, 0x65, 0x5d, 0x83, 0xab, 0xd1, 0x70, 0x6b, 0x1c, 0xdb, 0xe7, 0x86, 0x0d, 0xfb, 0xe7, 0xf8, 0x2a, 0x58, 0x6e, 0x7a, 0x66, 0x13, 0x53, 0x3a, 0x6f, 0x8d, 0x43, 0x5f, 0x14, 0x23, 0x14, 0xff, 0x3d, 0x52, 0x7f, 0xee, 0xbd, 0x7a, 0x34, 0x8b, 0x35, 0x24, 0xc3, 0x7a, 0xdb, 0xcf, 0x22, 0x74, 0x9a, 0x8f,
- /* (2^394)P */ 0xdb, 0x20, 0xfc, 0xe5, 0x39, 0x4e, 0x7d, 0x78, 0xee, 0x0b, 0xbf, 0x1d, 0x80, 0xd4, 0x05, 0x4f, 0xb9, 0xd7, 0x4e, 0x94, 0x88, 0x9a, 0x50, 0x78, 0x1a, 0x70, 0x8c, 0xcc, 0x25, 0xb6, 0x61, 0x09, 0xdc, 0x7b, 0xea, 0x3f, 0x7f, 0xea, 0x2a, 0x0d, 0x47, 0x1c, 0x8e, 0xa6, 0x5b, 0xd2, 0xa3, 0x61, 0x93, 0x3c, 0x68, 0x9f, 0x8b, 0xea, 0xb0, 0xcb,
- /* (2^395)P */ 0xff, 0x54, 0x02, 0x19, 0xae, 0x8b, 0x4c, 0x2c, 0x3a, 0xe0, 0xe4, 0xac, 0x87, 0xf7, 0x51, 0x45, 0x41, 0x43, 0xdc, 0xaa, 0xcd, 0xcb, 0xdc, 0x40, 0xe3, 0x44, 0x3b, 0x1d, 0x9e, 0x3d, 0xb9, 0x82, 0xcc, 0x7a, 0xc5, 0x12, 0xf8, 0x1e, 0xdd, 0xdb, 0x8d, 0xb0, 0x2a, 0xe8, 0xe6, 0x6c, 0x94, 0x3b, 0xb7, 0x2d, 0xba, 0x79, 0x3b, 0xb5, 0x86, 0xfb,
- /* (2^396)P */ 0x82, 0x88, 0x13, 0xdd, 0x6c, 0xcd, 0x85, 0x2b, 0x90, 0x86, 0xb7, 0xac, 0x16, 0xa6, 0x6e, 0x6a, 0x94, 0xd8, 0x1e, 0x4e, 0x41, 0x0f, 0xce, 0x81, 0x6a, 0xa8, 0x26, 0x56, 0x43, 0x52, 0x52, 0xe6, 0xff, 0x88, 0xcf, 0x47, 0x05, 0x1d, 0xff, 0xf3, 0xa0, 0x10, 0xb2, 0x97, 0x87, 0xeb, 0x47, 0xbb, 0xfa, 0x1f, 0xe8, 0x4c, 0xce, 0xc4, 0xcd, 0x93,
- /* (2^397)P */ 0xf4, 0x11, 0xf5, 0x8d, 0x89, 0x29, 0x79, 0xb3, 0x59, 0x0b, 0x29, 0x7d, 0x9c, 0x12, 0x4a, 0x65, 0x72, 0x3a, 0xf9, 0xec, 0x37, 0x18, 0x86, 0xef, 0x44, 0x07, 0x25, 0x74, 0x76, 0x53, 0xed, 0x51, 0x01, 0xc6, 0x28, 0xc5, 0xc3, 0x4a, 0x0f, 0x99, 0xec, 0xc8, 0x40, 0x5a, 0x83, 0x30, 0x79, 0xa2, 0x3e, 0x63, 0x09, 0x2d, 0x6f, 0x23, 0x54, 0x1c,
- /* (2^398)P */ 0x5c, 0x6f, 0x3b, 0x1c, 0x30, 0x77, 0x7e, 0x87, 0x66, 0x83, 0x2e, 0x7e, 0x85, 0x50, 0xfd, 0xa0, 0x7a, 0xc2, 0xf5, 0x0f, 0xc1, 0x64, 0xe7, 0x0b, 0xbd, 0x59, 0xa7, 0xe7, 0x65, 0x53, 0xc3, 0xf5, 0x55, 0x5b, 0xe1, 0x82, 0x30, 0x5a, 0x61, 0xcd, 0xa0, 0x89, 0x32, 0xdb, 0x87, 0xfc, 0x21, 0x8a, 0xab, 0x6d, 0x82, 0xa8, 0x42, 0x81, 0x4f, 0xf2,
- /* (2^399)P */ 0xb3, 0xeb, 0x88, 0x18, 0xf6, 0x56, 0x96, 0xbf, 0xba, 0x5d, 0x71, 0xa1, 0x5a, 0xd1, 0x04, 0x7b, 0xd5, 0x46, 0x01, 0x74, 0xfe, 0x15, 0x25, 0xb7, 0xff, 0x0c, 0x24, 0x47, 0xac, 0xfd, 0xab, 0x47, 0x32, 0xe1, 0x6a, 0x4e, 0xca, 0xcf, 0x7f, 0xdd, 0xf8, 0xd2, 0x4b, 0x3b, 0xf5, 0x17, 0xba, 0xba, 0x8b, 0xa1, 0xec, 0x28, 0x3f, 0x97, 0xab, 0x2a,
- /* (2^400)P */ 0x51, 0x38, 0xc9, 0x5e, 0xc6, 0xb3, 0x64, 0xf2, 0x24, 0x4d, 0x04, 0x7d, 0xc8, 0x39, 0x0c, 0x4a, 0xc9, 0x73, 0x74, 0x1b, 0x5c, 0xb2, 0xc5, 0x41, 0x62, 0xa0, 0x4c, 0x6d, 0x8d, 0x91, 0x9a, 0x7b, 0x88, 0xab, 0x9c, 0x7e, 0x23, 0xdb, 0x6f, 0xb5, 0x72, 0xd6, 0x47, 0x40, 0xef, 0x22, 0x58, 0x62, 0x19, 0x6c, 0x38, 0xba, 0x5b, 0x00, 0x30, 0x9f,
- /* (2^401)P */ 0x65, 0xbb, 0x3b, 0x9b, 0xe9, 0xae, 0xbf, 0xbe, 0xe4, 0x13, 0x95, 0xf3, 0xe3, 0x77, 0xcb, 0xe4, 0x9a, 0x22, 0xb5, 0x4a, 0x08, 0x9d, 0xb3, 0x9e, 0x27, 0xe0, 0x15, 0x6c, 0x9f, 0x7e, 0x9a, 0x5e, 0x15, 0x45, 0x25, 0x8d, 0x01, 0x0a, 0xd2, 0x2b, 0xbd, 0x48, 0x06, 0x0d, 0x18, 0x97, 0x4b, 0xdc, 0xbc, 0xf0, 0xcd, 0xb2, 0x52, 0x3c, 0xac, 0xf5,
- /* (2^402)P */ 0x3e, 0xed, 0x47, 0x6b, 0x5c, 0xf6, 0x76, 0xd0, 0xe9, 0x15, 0xa3, 0xcb, 0x36, 0x00, 0x21, 0xa3, 0x79, 0x20, 0xa5, 0x3e, 0x88, 0x03, 0xcb, 0x7e, 0x63, 0xbb, 0xed, 0xa9, 0x13, 0x35, 0x16, 0xaf, 0x2e, 0xb4, 0x70, 0x14, 0x93, 0xfb, 0xc4, 0x9b, 0xd8, 0xb1, 0xbe, 0x43, 0xd1, 0x85, 0xb8, 0x97, 0xef, 0xea, 0x88, 0xa1, 0x25, 0x52, 0x62, 0x75,
- /* (2^403)P */ 0x8e, 0x4f, 0xaa, 0x23, 0x62, 0x7e, 0x2b, 0x37, 0x89, 0x00, 0x11, 0x30, 0xc5, 0x33, 0x4a, 0x89, 0x8a, 0xe2, 0xfc, 0x5c, 0x6a, 0x75, 0xe5, 0xf7, 0x02, 0x4a, 0x9b, 0xf7, 0xb5, 0x6a, 0x85, 0x31, 0xd3, 0x5a, 0xcf, 0xc3, 0xf8, 0xde, 0x2f, 0xcf, 0xb5, 0x24, 0xf4, 0xe3, 0xa1, 0xad, 0x42, 0xae, 0x09, 0xb9, 0x2e, 0x04, 0x2d, 0x01, 0x22, 0x3f,
- /* (2^404)P */ 0x41, 0x16, 0xfb, 0x7d, 0x50, 0xfd, 0xb5, 0xba, 0x88, 0x24, 0xba, 0xfd, 0x3d, 0xb2, 0x90, 0x15, 0xb7, 0xfa, 0xa2, 0xe1, 0x4c, 0x7d, 0xb9, 0xc6, 0xff, 0x81, 0x57, 0xb6, 0xc2, 0x9e, 0xcb, 0xc4, 0x35, 0xbd, 0x01, 0xb7, 0xaa, 0xce, 0xd0, 0xe9, 0xb5, 0xd6, 0x72, 0xbf, 0xd2, 0xee, 0xc7, 0xac, 0x94, 0xff, 0x29, 0x57, 0x02, 0x49, 0x09, 0xad,
- /* (2^405)P */ 0x27, 0xa5, 0x78, 0x1b, 0xbf, 0x6b, 0xaf, 0x0b, 0x8c, 0xd9, 0xa8, 0x37, 0xb0, 0x67, 0x18, 0xb6, 0xc7, 0x05, 0x8a, 0x67, 0x03, 0x30, 0x62, 0x6e, 0x56, 0x82, 0xa9, 0x54, 0x3e, 0x0c, 0x4e, 0x07, 0xe1, 0x5a, 0x38, 0xed, 0xfa, 0xc8, 0x55, 0x6b, 0x08, 0xa3, 0x6b, 0x64, 0x2a, 0x15, 0xd6, 0x39, 0x6f, 0x47, 0x99, 0x42, 0x3f, 0x33, 0x84, 0x8f,
- /* (2^406)P */ 0xbc, 0x45, 0x29, 0x81, 0x0e, 0xa4, 0xc5, 0x72, 0x3a, 0x10, 0xe1, 0xc4, 0x1e, 0xda, 0xc3, 0xfe, 0xb0, 0xce, 0xd2, 0x13, 0x34, 0x67, 0x21, 0xc6, 0x7e, 0xf9, 0x8c, 0xff, 0x39, 0x50, 0xae, 0x92, 0x60, 0x35, 0x2f, 0x8b, 0x6e, 0xc9, 0xc1, 0x27, 0x3a, 0x94, 0x66, 0x3e, 0x26, 0x84, 0x93, 0xc8, 0x6c, 0xcf, 0xd2, 0x03, 0xa1, 0x10, 0xcf, 0xb7,
- /* (2^407)P */ 0x64, 0xda, 0x19, 0xf6, 0xc5, 0x73, 0x17, 0x44, 0x88, 0x81, 0x07, 0x0d, 0x34, 0xb2, 0x75, 0xf9, 0xd9, 0xe2, 0xe0, 0x8b, 0x71, 0xcf, 0x72, 0x34, 0x83, 0xb4, 0xce, 0xfc, 0xd7, 0x29, 0x09, 0x5a, 0x98, 0xbf, 0x14, 0xac, 0x77, 0x55, 0x38, 0x47, 0x5b, 0x0f, 0x40, 0x24, 0xe5, 0xa5, 0xa6, 0xac, 0x2d, 0xa6, 0xff, 0x9c, 0x73, 0xfe, 0x5c, 0x7e,
- /* (2^408)P */ 0x1e, 0x33, 0xcc, 0x68, 0xb2, 0xbc, 0x8c, 0x93, 0xaf, 0xcc, 0x38, 0xf8, 0xd9, 0x16, 0x72, 0x50, 0xac, 0xd9, 0xb5, 0x0b, 0x9a, 0xbe, 0x46, 0x7a, 0xf1, 0xee, 0xf1, 0xad, 0xec, 0x5b, 0x59, 0x27, 0x9c, 0x05, 0xa3, 0x87, 0xe0, 0x37, 0x2c, 0x83, 0xce, 0xb3, 0x65, 0x09, 0x8e, 0xc3, 0x9c, 0xbf, 0x6a, 0xa2, 0x00, 0xcc, 0x12, 0x36, 0xc5, 0x95,
- /* (2^409)P */ 0x36, 0x11, 0x02, 0x14, 0x9c, 0x3c, 0xeb, 0x2f, 0x23, 0x5b, 0x6b, 0x2b, 0x08, 0x54, 0x53, 0xac, 0xb2, 0xa3, 0xe0, 0x26, 0x62, 0x3c, 0xe4, 0xe1, 0x81, 0xee, 0x13, 0x3e, 0xa4, 0x97, 0xef, 0xf9, 0x92, 0x27, 0x01, 0xce, 0x54, 0x8b, 0x3e, 0x31, 0xbe, 0xa7, 0x88, 0xcf, 0x47, 0x99, 0x3c, 0x10, 0x6f, 0x60, 0xb3, 0x06, 0x4e, 0xee, 0x1b, 0xf0,
- /* (2^410)P */ 0x59, 0x49, 0x66, 0xcf, 0x22, 0xe6, 0xf6, 0x73, 0xfe, 0xa3, 0x1c, 0x09, 0xfa, 0x5f, 0x65, 0xa8, 0xf0, 0x82, 0xc2, 0xef, 0x16, 0x63, 0x6e, 0x79, 0x69, 0x51, 0x39, 0x07, 0x65, 0xc4, 0x81, 0xec, 0x73, 0x0f, 0x15, 0x93, 0xe1, 0x30, 0x33, 0xe9, 0x37, 0x86, 0x42, 0x4c, 0x1f, 0x9b, 0xad, 0xee, 0x3f, 0xf1, 0x2a, 0x8e, 0x6a, 0xa3, 0xc8, 0x35,
- /* (2^411)P */ 0x1e, 0x49, 0xf1, 0xdd, 0xd2, 0x9c, 0x8e, 0x78, 0xb2, 0x06, 0xe4, 0x6a, 0xab, 0x3a, 0xdc, 0xcd, 0xf4, 0xeb, 0xe1, 0xe7, 0x2f, 0xaa, 0xeb, 0x40, 0x31, 0x9f, 0xb9, 0xab, 0x13, 0xa9, 0x78, 0xbf, 0x38, 0x89, 0x0e, 0x85, 0x14, 0x8b, 0x46, 0x76, 0x14, 0xda, 0xcf, 0x33, 0xc8, 0x79, 0xd3, 0xd5, 0xa3, 0x6a, 0x69, 0x45, 0x70, 0x34, 0xc3, 0xe9,
- /* (2^412)P */ 0x5e, 0xe7, 0x78, 0xe9, 0x24, 0xcc, 0xe9, 0xf4, 0xc8, 0x6b, 0xe0, 0xfb, 0x3a, 0xbe, 0xcc, 0x42, 0x4a, 0x00, 0x22, 0xf8, 0xe6, 0x32, 0xbe, 0x6d, 0x18, 0x55, 0x60, 0xe9, 0x72, 0x69, 0x50, 0x56, 0xca, 0x04, 0x18, 0x38, 0xa1, 0xee, 0xd8, 0x38, 0x3c, 0xa7, 0x70, 0xe2, 0xb9, 0x4c, 0xa0, 0xc8, 0x89, 0x72, 0xcf, 0x49, 0x7f, 0xdf, 0xbc, 0x67,
- /* (2^413)P */ 0x1d, 0x17, 0xcb, 0x0b, 0xbd, 0xb2, 0x36, 0xe3, 0xa8, 0x99, 0x31, 0xb6, 0x26, 0x9c, 0x0c, 0x74, 0xaf, 0x4d, 0x24, 0x61, 0xcf, 0x31, 0x7b, 0xed, 0xdd, 0xc3, 0xf6, 0x32, 0x70, 0xfe, 0x17, 0xf6, 0x51, 0x37, 0x65, 0xce, 0x5d, 0xaf, 0xa5, 0x2f, 0x2a, 0xfe, 0x00, 0x71, 0x7c, 0x50, 0xbe, 0x21, 0xc7, 0xed, 0xc6, 0xfc, 0x67, 0xcf, 0x9c, 0xdd,
- /* (2^414)P */ 0x26, 0x3e, 0xf8, 0xbb, 0xd0, 0xb1, 0x01, 0xd8, 0xeb, 0x0b, 0x62, 0x87, 0x35, 0x4c, 0xde, 0xca, 0x99, 0x9c, 0x6d, 0xf7, 0xb6, 0xf0, 0x57, 0x0a, 0x52, 0x29, 0x6a, 0x3f, 0x26, 0x31, 0x04, 0x07, 0x2a, 0xc9, 0xfa, 0x9b, 0x0e, 0x62, 0x8e, 0x72, 0xf2, 0xad, 0xce, 0xb6, 0x35, 0x7a, 0xc1, 0xae, 0x35, 0xc7, 0xa3, 0x14, 0xcf, 0x0c, 0x28, 0xb7,
- /* (2^415)P */ 0xa6, 0xf1, 0x32, 0x3a, 0x20, 0xd2, 0x24, 0x97, 0xcf, 0x5d, 0x37, 0x99, 0xaf, 0x33, 0x7a, 0x5b, 0x7a, 0xcc, 0x4e, 0x41, 0x38, 0xb1, 0x4e, 0xad, 0xc9, 0xd9, 0x71, 0x7e, 0xb2, 0xf5, 0xd5, 0x01, 0x6c, 0x4d, 0xfd, 0xa1, 0xda, 0x03, 0x38, 0x9b, 0x3d, 0x92, 0x92, 0xf2, 0xca, 0xbf, 0x1f, 0x24, 0xa4, 0xbb, 0x30, 0x6a, 0x74, 0x56, 0xc8, 0xce,
- /* (2^416)P */ 0x27, 0xf4, 0xed, 0xc9, 0xc3, 0xb1, 0x79, 0x85, 0xbe, 0xf6, 0xeb, 0xf3, 0x55, 0xc7, 0xaa, 0xa6, 0xe9, 0x07, 0x5d, 0xf4, 0xeb, 0xa6, 0x81, 0xe3, 0x0e, 0xcf, 0xa3, 0xc1, 0xef, 0xe7, 0x34, 0xb2, 0x03, 0x73, 0x8a, 0x91, 0xf1, 0xad, 0x05, 0xc7, 0x0b, 0x43, 0x99, 0x12, 0x31, 0xc8, 0xc7, 0xc5, 0xa4, 0x3d, 0xcd, 0xe5, 0x4e, 0x6d, 0x24, 0xdd,
- /* (2^417)P */ 0x61, 0x54, 0xd0, 0x95, 0x2c, 0x45, 0x75, 0xac, 0xb5, 0x1a, 0x9d, 0x11, 0xeb, 0xed, 0x6b, 0x57, 0xa3, 0xe6, 0xcd, 0x77, 0xd4, 0x83, 0x8e, 0x39, 0xf1, 0x0f, 0x98, 0xcb, 0x40, 0x02, 0x6e, 0x10, 0x82, 0x9e, 0xb4, 0x93, 0x76, 0xd7, 0x97, 0xa3, 0x53, 0x12, 0x86, 0xc6, 0x15, 0x78, 0x73, 0x93, 0xe7, 0x7f, 0xcf, 0x1f, 0xbf, 0xcd, 0xd2, 0x7a,
- /* (2^418)P */ 0xc2, 0x21, 0xdc, 0xd5, 0x69, 0xff, 0xca, 0x49, 0x3a, 0xe1, 0xc3, 0x69, 0x41, 0x56, 0xc1, 0x76, 0x63, 0x24, 0xbd, 0x64, 0x1b, 0x3d, 0x92, 0xf9, 0x13, 0x04, 0x25, 0xeb, 0x27, 0xa6, 0xef, 0x39, 0x3a, 0x80, 0xe0, 0xf8, 0x27, 0xee, 0xc9, 0x49, 0x77, 0xef, 0x3f, 0x29, 0x3d, 0x5e, 0xe6, 0x66, 0x83, 0xd1, 0xf6, 0xfe, 0x9d, 0xbc, 0xf1, 0x96,
- /* (2^419)P */ 0x6b, 0xc6, 0x99, 0x26, 0x3c, 0xf3, 0x63, 0xf9, 0xc7, 0x29, 0x8c, 0x52, 0x62, 0x2d, 0xdc, 0x8a, 0x66, 0xce, 0x2c, 0xa7, 0xe4, 0xf0, 0xd7, 0x37, 0x17, 0x1e, 0xe4, 0xa3, 0x53, 0x7b, 0x29, 0x8e, 0x60, 0x99, 0xf9, 0x0c, 0x7c, 0x6f, 0xa2, 0xcc, 0x9f, 0x80, 0xdd, 0x5e, 0x46, 0xaa, 0x0d, 0x6c, 0xc9, 0x6c, 0xf7, 0x78, 0x5b, 0x38, 0xe3, 0x24,
- /* (2^420)P */ 0x4b, 0x75, 0x6a, 0x2f, 0x08, 0xe1, 0x72, 0x76, 0xab, 0x82, 0x96, 0xdf, 0x3b, 0x1f, 0x9b, 0xd8, 0xed, 0xdb, 0xcd, 0x15, 0x09, 0x5a, 0x1e, 0xb7, 0xc5, 0x26, 0x72, 0x07, 0x0c, 0x50, 0xcd, 0x3b, 0x4d, 0x3f, 0xa2, 0x67, 0xc2, 0x02, 0x61, 0x2e, 0x68, 0xe9, 0x6f, 0xf0, 0x21, 0x2a, 0xa7, 0x3b, 0x88, 0x04, 0x11, 0x64, 0x49, 0x0d, 0xb4, 0x46,
- /* (2^421)P */ 0x63, 0x85, 0xf3, 0xc5, 0x2b, 0x5a, 0x9f, 0xf0, 0x17, 0xcb, 0x45, 0x0a, 0xf3, 0x6e, 0x7e, 0xb0, 0x7c, 0xbc, 0xf0, 0x4f, 0x3a, 0xb0, 0xbc, 0x36, 0x36, 0x52, 0x51, 0xcb, 0xfe, 0x9a, 0xcb, 0xe8, 0x7e, 0x4b, 0x06, 0x7f, 0xaa, 0x35, 0xc8, 0x0e, 0x7a, 0x30, 0xa3, 0xb1, 0x09, 0xbb, 0x86, 0x4c, 0xbe, 0xb8, 0xbd, 0xe0, 0x32, 0xa5, 0xd4, 0xf7,
- /* (2^422)P */ 0x7d, 0x50, 0x37, 0x68, 0x4e, 0x22, 0xb2, 0x2c, 0xd5, 0x0f, 0x2b, 0x6d, 0xb1, 0x51, 0xf2, 0x82, 0xe9, 0x98, 0x7c, 0x50, 0xc7, 0x96, 0x7e, 0x0e, 0xdc, 0xb1, 0x0e, 0xb2, 0x63, 0x8c, 0x30, 0x37, 0x72, 0x21, 0x9c, 0x61, 0xc2, 0xa7, 0x33, 0xd9, 0xb2, 0x63, 0x93, 0xd1, 0x6b, 0x6a, 0x73, 0xa5, 0x58, 0x80, 0xff, 0x04, 0xc7, 0x83, 0x21, 0x29,
- /* (2^423)P */ 0x29, 0x04, 0xbc, 0x99, 0x39, 0xc9, 0x58, 0xc9, 0x6b, 0x17, 0xe8, 0x90, 0xb3, 0xe6, 0xa9, 0xb6, 0x28, 0x9b, 0xcb, 0x3b, 0x28, 0x90, 0x68, 0x71, 0xff, 0xcf, 0x08, 0x78, 0xc9, 0x8d, 0xa8, 0x4e, 0x43, 0xd1, 0x1c, 0x9e, 0xa4, 0xe3, 0xdf, 0xbf, 0x92, 0xf4, 0xf9, 0x41, 0xba, 0x4d, 0x1c, 0xf9, 0xdd, 0x74, 0x76, 0x1c, 0x6e, 0x3e, 0x94, 0x87,
- /* (2^424)P */ 0xe4, 0xda, 0xc5, 0xd7, 0xfb, 0x87, 0xc5, 0x4d, 0x6b, 0x19, 0xaa, 0xb9, 0xbc, 0x8c, 0xf2, 0x8a, 0xd8, 0x5d, 0xdb, 0x4d, 0xef, 0xa6, 0xf2, 0x65, 0xf1, 0x22, 0x9c, 0xf1, 0x46, 0x30, 0x71, 0x7c, 0xe4, 0x53, 0x8e, 0x55, 0x2e, 0x9c, 0x9a, 0x31, 0x2a, 0xc3, 0xab, 0x0f, 0xde, 0xe4, 0xbe, 0xd8, 0x96, 0x50, 0x6e, 0x0c, 0x54, 0x49, 0xe6, 0xec,
- /* (2^425)P */ 0x3c, 0x1d, 0x5a, 0xa5, 0xda, 0xad, 0xdd, 0xc2, 0xae, 0xac, 0x6f, 0x86, 0x75, 0x31, 0x91, 0x64, 0x45, 0x9d, 0xa4, 0xf0, 0x81, 0xf1, 0x0e, 0xba, 0x74, 0xaf, 0x7b, 0xcd, 0x6f, 0xfe, 0xac, 0x4e, 0xdb, 0x4e, 0x45, 0x35, 0x36, 0xc5, 0xc0, 0x6c, 0x3d, 0x64, 0xf4, 0xd8, 0x07, 0x62, 0xd1, 0xec, 0xf3, 0xfc, 0x93, 0xc9, 0x28, 0x0c, 0x2c, 0xf3,
- /* (2^426)P */ 0x0c, 0x69, 0x2b, 0x5c, 0xb6, 0x41, 0x69, 0xf1, 0xa4, 0xf1, 0x5b, 0x75, 0x4c, 0x42, 0x8b, 0x47, 0xeb, 0x69, 0xfb, 0xa8, 0xe6, 0xf9, 0x7b, 0x48, 0x50, 0xaf, 0xd3, 0xda, 0xb2, 0x35, 0x10, 0xb5, 0x5b, 0x40, 0x90, 0x39, 0xc9, 0x07, 0x06, 0x73, 0x26, 0x20, 0x95, 0x01, 0xa4, 0x2d, 0xf0, 0xe7, 0x2e, 0x00, 0x7d, 0x41, 0x09, 0x68, 0x13, 0xc4,
- /* (2^427)P */ 0xbe, 0x38, 0x78, 0xcf, 0xc9, 0x4f, 0x36, 0xca, 0x09, 0x61, 0x31, 0x3c, 0x57, 0x2e, 0xec, 0x17, 0xa4, 0x7d, 0x19, 0x2b, 0x9b, 0x5b, 0xbe, 0x8f, 0xd6, 0xc5, 0x2f, 0x86, 0xf2, 0x64, 0x76, 0x17, 0x00, 0x6e, 0x1a, 0x8c, 0x67, 0x1b, 0x68, 0xeb, 0x15, 0xa2, 0xd6, 0x09, 0x91, 0xdd, 0x23, 0x0d, 0x98, 0xb2, 0x10, 0x19, 0x55, 0x9b, 0x63, 0xf2,
- /* (2^428)P */ 0x51, 0x1f, 0x93, 0xea, 0x2a, 0x3a, 0xfa, 0x41, 0xc0, 0x57, 0xfb, 0x74, 0xa6, 0x65, 0x09, 0x56, 0x14, 0xb6, 0x12, 0xaa, 0xb3, 0x1a, 0x8d, 0x3b, 0x76, 0x91, 0x7a, 0x23, 0x56, 0x9c, 0x6a, 0xc0, 0xe0, 0x3c, 0x3f, 0xb5, 0x1a, 0xf4, 0x57, 0x71, 0x93, 0x2b, 0xb1, 0xa7, 0x70, 0x57, 0x22, 0x80, 0xf5, 0xb8, 0x07, 0x77, 0x87, 0x0c, 0xbe, 0x83,
- /* (2^429)P */ 0x07, 0x9b, 0x0e, 0x52, 0x38, 0x63, 0x13, 0x86, 0x6a, 0xa6, 0xb4, 0xd2, 0x60, 0x68, 0x9a, 0x99, 0x82, 0x0a, 0x04, 0x5f, 0x89, 0x7a, 0x1a, 0x2a, 0xae, 0x2d, 0x35, 0x0c, 0x1e, 0xad, 0xef, 0x4f, 0x9a, 0xfc, 0xc8, 0xd9, 0xcf, 0x9d, 0x48, 0x71, 0xa5, 0x55, 0x79, 0x73, 0x39, 0x1b, 0xd8, 0x73, 0xec, 0x9b, 0x03, 0x16, 0xd8, 0x82, 0xf7, 0x67,
- /* (2^430)P */ 0x52, 0x67, 0x42, 0x21, 0xc9, 0x40, 0x78, 0x82, 0x2b, 0x95, 0x2d, 0x20, 0x92, 0xd1, 0xe2, 0x61, 0x25, 0xb0, 0xc6, 0x9c, 0x20, 0x59, 0x8e, 0x28, 0x6f, 0xf3, 0xfd, 0xd3, 0xc1, 0x32, 0x43, 0xc9, 0xa6, 0x08, 0x7a, 0x77, 0x9c, 0x4c, 0x8c, 0x33, 0x71, 0x13, 0x69, 0xe3, 0x52, 0x30, 0xa7, 0xf5, 0x07, 0x67, 0xac, 0xad, 0x46, 0x8a, 0x26, 0x25,
- /* (2^431)P */ 0xda, 0x86, 0xc4, 0xa2, 0x71, 0x56, 0xdd, 0xd2, 0x48, 0xd3, 0xde, 0x42, 0x63, 0x01, 0xa7, 0x2c, 0x92, 0x83, 0x6f, 0x2e, 0xd8, 0x1e, 0x3f, 0xc1, 0xc5, 0x42, 0x4e, 0x34, 0x19, 0x54, 0x6e, 0x35, 0x2c, 0x51, 0x2e, 0xfd, 0x0f, 0x9a, 0x45, 0x66, 0x5e, 0x4a, 0x83, 0xda, 0x0a, 0x53, 0x68, 0x63, 0xfa, 0xce, 0x47, 0x20, 0xd3, 0x34, 0xba, 0x0d,
- /* (2^432)P */ 0xd0, 0xe9, 0x64, 0xa4, 0x61, 0x4b, 0x86, 0xe5, 0x93, 0x6f, 0xda, 0x0e, 0x31, 0x7e, 0x6e, 0xe3, 0xc6, 0x73, 0xd8, 0xa3, 0x08, 0x57, 0x52, 0xcd, 0x51, 0x63, 0x1d, 0x9f, 0x93, 0x00, 0x62, 0x91, 0x26, 0x21, 0xa7, 0xdd, 0x25, 0x0f, 0x09, 0x0d, 0x35, 0xad, 0xcf, 0x11, 0x8e, 0x6e, 0xe8, 0xae, 0x1d, 0x95, 0xcb, 0x88, 0xf8, 0x70, 0x7b, 0x91,
- /* (2^433)P */ 0x0c, 0x19, 0x5c, 0xd9, 0x8d, 0xda, 0x9d, 0x2c, 0x90, 0x54, 0x65, 0xe8, 0xb6, 0x35, 0x50, 0xae, 0xea, 0xae, 0x43, 0xb7, 0x1e, 0x99, 0x8b, 0x4c, 0x36, 0x4e, 0xe4, 0x1e, 0xc4, 0x64, 0x43, 0xb6, 0xeb, 0xd4, 0xe9, 0x60, 0x22, 0xee, 0xcf, 0xb8, 0x52, 0x1b, 0xf0, 0x04, 0xce, 0xbc, 0x2b, 0xf0, 0xbe, 0xcd, 0x44, 0x74, 0x1e, 0x1f, 0x63, 0xf9,
- /* (2^434)P */ 0xe1, 0x3f, 0x95, 0x94, 0xb2, 0xb6, 0x31, 0xa9, 0x1b, 0xdb, 0xfd, 0x0e, 0xdb, 0xdd, 0x1a, 0x22, 0x78, 0x60, 0x9f, 0x75, 0x5f, 0x93, 0x06, 0x0c, 0xd8, 0xbb, 0xa2, 0x85, 0x2b, 0x5e, 0xc0, 0x9b, 0xa8, 0x5d, 0xaf, 0x93, 0x91, 0x91, 0x47, 0x41, 0x1a, 0xfc, 0xb4, 0x51, 0x85, 0xad, 0x69, 0x4d, 0x73, 0x69, 0xd5, 0x4e, 0x82, 0xfb, 0x66, 0xcb,
- /* (2^435)P */ 0x7c, 0xbe, 0xc7, 0x51, 0xc4, 0x74, 0x6e, 0xab, 0xfd, 0x41, 0x4f, 0x76, 0x4f, 0x24, 0x03, 0xd6, 0x2a, 0xb7, 0x42, 0xb4, 0xda, 0x41, 0x2c, 0x82, 0x48, 0x4c, 0x7f, 0x6f, 0x25, 0x5d, 0x36, 0xd4, 0x69, 0xf5, 0xef, 0x02, 0x81, 0xea, 0x6f, 0x19, 0x69, 0xe8, 0x6f, 0x5b, 0x2f, 0x14, 0x0e, 0x6f, 0x89, 0xb4, 0xb5, 0xd8, 0xae, 0xef, 0x7b, 0x87,
- /* (2^436)P */ 0xe9, 0x91, 0xa0, 0x8b, 0xc9, 0xe0, 0x01, 0x90, 0x37, 0xc1, 0x6f, 0xdc, 0x5e, 0xf7, 0xbf, 0x43, 0x00, 0xaa, 0x10, 0x76, 0x76, 0x18, 0x6e, 0x19, 0x1e, 0x94, 0x50, 0x11, 0x0a, 0xd1, 0xe2, 0xdb, 0x08, 0x21, 0xa0, 0x1f, 0xdb, 0x54, 0xfe, 0xea, 0x6e, 0xa3, 0x68, 0x56, 0x87, 0x0b, 0x22, 0x4e, 0x66, 0xf3, 0x82, 0x82, 0x00, 0xcd, 0xd4, 0x12,
- /* (2^437)P */ 0x25, 0x8e, 0x24, 0x77, 0x64, 0x4c, 0xe0, 0xf8, 0x18, 0xc0, 0xdc, 0xc7, 0x1b, 0x35, 0x65, 0xde, 0x67, 0x41, 0x5e, 0x6f, 0x90, 0x82, 0xa7, 0x2e, 0x6d, 0xf1, 0x47, 0xb4, 0x92, 0x9c, 0xfd, 0x6a, 0x9a, 0x41, 0x36, 0x20, 0x24, 0x58, 0xc3, 0x59, 0x07, 0x9a, 0xfa, 0x9f, 0x03, 0xcb, 0xc7, 0x69, 0x37, 0x60, 0xe1, 0xab, 0x13, 0x72, 0xee, 0xa2,
- /* (2^438)P */ 0x74, 0x78, 0xfb, 0x13, 0xcb, 0x8e, 0x37, 0x1a, 0xf6, 0x1d, 0x17, 0x83, 0x06, 0xd4, 0x27, 0x06, 0x21, 0xe8, 0xda, 0xdf, 0x6b, 0xf3, 0x83, 0x6b, 0x34, 0x8a, 0x8c, 0xee, 0x01, 0x05, 0x5b, 0xed, 0xd3, 0x1b, 0xc9, 0x64, 0x83, 0xc9, 0x49, 0xc2, 0x57, 0x1b, 0xdd, 0xcf, 0xf1, 0x9d, 0x63, 0xee, 0x1c, 0x0d, 0xa0, 0x0a, 0x73, 0x1f, 0x5b, 0x32,
- /* (2^439)P */ 0x29, 0xce, 0x1e, 0xc0, 0x6a, 0xf5, 0xeb, 0x99, 0x5a, 0x39, 0x23, 0xe9, 0xdd, 0xac, 0x44, 0x88, 0xbc, 0x80, 0x22, 0xde, 0x2c, 0xcb, 0xa8, 0x3b, 0xff, 0xf7, 0x6f, 0xc7, 0x71, 0x72, 0xa8, 0xa3, 0xf6, 0x4d, 0xc6, 0x75, 0xda, 0x80, 0xdc, 0xd9, 0x30, 0xd9, 0x07, 0x50, 0x5a, 0x54, 0x7d, 0xda, 0x39, 0x6f, 0x78, 0x94, 0xbf, 0x25, 0x98, 0xdc,
- /* (2^440)P */ 0x01, 0x26, 0x62, 0x44, 0xfb, 0x0f, 0x11, 0x72, 0x73, 0x0a, 0x16, 0xc7, 0x16, 0x9c, 0x9b, 0x37, 0xd8, 0xff, 0x4f, 0xfe, 0x57, 0xdb, 0xae, 0xef, 0x7d, 0x94, 0x30, 0x04, 0x70, 0x83, 0xde, 0x3c, 0xd4, 0xb5, 0x70, 0xda, 0xa7, 0x55, 0xc8, 0x19, 0xe1, 0x36, 0x15, 0x61, 0xe7, 0x3b, 0x7d, 0x85, 0xbb, 0xf3, 0x42, 0x5a, 0x94, 0xf4, 0x53, 0x2a,
- /* (2^441)P */ 0x14, 0x60, 0xa6, 0x0b, 0x83, 0xe1, 0x23, 0x77, 0xc0, 0xce, 0x50, 0xed, 0x35, 0x8d, 0x98, 0x99, 0x7d, 0xf5, 0x8d, 0xce, 0x94, 0x25, 0xc8, 0x0f, 0x6d, 0xfa, 0x4a, 0xa4, 0x3a, 0x1f, 0x66, 0xfb, 0x5a, 0x64, 0xaf, 0x8b, 0x54, 0x54, 0x44, 0x3f, 0x5b, 0x88, 0x61, 0xe4, 0x48, 0x45, 0x26, 0x20, 0xbe, 0x0d, 0x06, 0xbb, 0x65, 0x59, 0xe1, 0x36,
- /* (2^442)P */ 0xb7, 0x98, 0xce, 0xa3, 0xe3, 0xee, 0x11, 0x1b, 0x9e, 0x24, 0x59, 0x75, 0x31, 0x37, 0x44, 0x6f, 0x6b, 0x9e, 0xec, 0xb7, 0x44, 0x01, 0x7e, 0xab, 0xbb, 0x69, 0x5d, 0x11, 0xb0, 0x30, 0x64, 0xea, 0x91, 0xb4, 0x7a, 0x8c, 0x02, 0x4c, 0xb9, 0x10, 0xa7, 0xc7, 0x79, 0xe6, 0xdc, 0x77, 0xe3, 0xc8, 0xef, 0x3e, 0xf9, 0x38, 0x81, 0xce, 0x9a, 0xb2,
- /* (2^443)P */ 0x91, 0x12, 0x76, 0xd0, 0x10, 0xb4, 0xaf, 0xe1, 0x89, 0x3a, 0x93, 0x6b, 0x5c, 0x19, 0x5f, 0x24, 0xed, 0x04, 0x92, 0xc7, 0xf0, 0x00, 0x08, 0xc1, 0x92, 0xff, 0x90, 0xdb, 0xb2, 0xbf, 0xdf, 0x49, 0xcd, 0xbd, 0x5c, 0x6e, 0xbf, 0x16, 0xbb, 0x61, 0xf9, 0x20, 0x33, 0x35, 0x93, 0x11, 0xbc, 0x59, 0x69, 0xce, 0x18, 0x9f, 0xf8, 0x7b, 0xa1, 0x6e,
- /* (2^444)P */ 0xa1, 0xf4, 0xaf, 0xad, 0xf8, 0xe6, 0x99, 0xd2, 0xa1, 0x4d, 0xde, 0x56, 0xc9, 0x7b, 0x0b, 0x11, 0x3e, 0xbf, 0x89, 0x1a, 0x9a, 0x90, 0xe5, 0xe2, 0xa6, 0x37, 0x88, 0xa1, 0x68, 0x59, 0xae, 0x8c, 0xec, 0x02, 0x14, 0x8d, 0xb7, 0x2e, 0x25, 0x75, 0x7f, 0x76, 0x1a, 0xd3, 0x4d, 0xad, 0x8a, 0x00, 0x6c, 0x96, 0x49, 0xa4, 0xc3, 0x2e, 0x5c, 0x7b,
- /* (2^445)P */ 0x26, 0x53, 0xf7, 0xda, 0xa8, 0x01, 0x14, 0xb1, 0x63, 0xe3, 0xc3, 0x89, 0x88, 0xb0, 0x85, 0x40, 0x2b, 0x26, 0x9a, 0x10, 0x1a, 0x70, 0x33, 0xf4, 0x50, 0x9d, 0x4d, 0xd8, 0x64, 0xc6, 0x0f, 0xe1, 0x17, 0xc8, 0x10, 0x4b, 0xfc, 0xa0, 0xc9, 0xba, 0x2c, 0x98, 0x09, 0xf5, 0x84, 0xb6, 0x7c, 0x4e, 0xa3, 0xe3, 0x81, 0x1b, 0x32, 0x60, 0x02, 0xdd,
- /* (2^446)P */ 0xa3, 0xe5, 0x86, 0xd4, 0x43, 0xa8, 0xd1, 0x98, 0x9d, 0x9d, 0xdb, 0x04, 0xcf, 0x6e, 0x35, 0x05, 0x30, 0x53, 0x3b, 0xbc, 0x90, 0x00, 0x4a, 0xc5, 0x40, 0x2a, 0x0f, 0xde, 0x1a, 0xd7, 0x36, 0x27, 0x44, 0x62, 0xa6, 0xac, 0x9d, 0xd2, 0x70, 0x69, 0x14, 0x39, 0x9b, 0xd1, 0xc3, 0x0a, 0x3a, 0x82, 0x0e, 0xf1, 0x94, 0xd7, 0x42, 0x94, 0xd5, 0x7d,
- /* (2^447)P */ 0x04, 0xc0, 0x6e, 0x12, 0x90, 0x70, 0xf9, 0xdf, 0xf7, 0xc9, 0x86, 0xc0, 0xe6, 0x92, 0x8b, 0x0a, 0xa1, 0xc1, 0x3b, 0xcc, 0x33, 0xb7, 0xf0, 0xeb, 0x51, 0x50, 0x80, 0x20, 0x69, 0x1c, 0x4f, 0x89, 0x05, 0x1e, 0xe4, 0x7a, 0x0a, 0xc2, 0xf0, 0xf5, 0x78, 0x91, 0x76, 0x34, 0x45, 0xdc, 0x24, 0x53, 0x24, 0x98, 0xe2, 0x73, 0x6f, 0xe6, 0x46, 0x67,
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go
deleted file mode 100644
index b6b236e5..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-var (
- // genX is the x-coordinate of the generator of Goldilocks curve.
- genX = fp.Elt{
- 0x5e, 0xc0, 0x0c, 0xc7, 0x2b, 0xa8, 0x26, 0x26,
- 0x8e, 0x93, 0x00, 0x8b, 0xe1, 0x80, 0x3b, 0x43,
- 0x11, 0x65, 0xb6, 0x2a, 0xf7, 0x1a, 0xae, 0x12,
- 0x64, 0xa4, 0xd3, 0xa3, 0x24, 0xe3, 0x6d, 0xea,
- 0x67, 0x17, 0x0f, 0x47, 0x70, 0x65, 0x14, 0x9e,
- 0xda, 0x36, 0xbf, 0x22, 0xa6, 0x15, 0x1d, 0x22,
- 0xed, 0x0d, 0xed, 0x6b, 0xc6, 0x70, 0x19, 0x4f,
- }
- // genY is the y-coordinate of the generator of Goldilocks curve.
- genY = fp.Elt{
- 0x14, 0xfa, 0x30, 0xf2, 0x5b, 0x79, 0x08, 0x98,
- 0xad, 0xc8, 0xd7, 0x4e, 0x2c, 0x13, 0xbd, 0xfd,
- 0xc4, 0x39, 0x7c, 0xe6, 0x1c, 0xff, 0xd3, 0x3a,
- 0xd7, 0xc2, 0xa0, 0x05, 0x1e, 0x9c, 0x78, 0x87,
- 0x40, 0x98, 0xa3, 0x6c, 0x73, 0x73, 0xea, 0x4b,
- 0x62, 0xc7, 0xc9, 0x56, 0x37, 0x20, 0x76, 0x88,
- 0x24, 0xbc, 0xb6, 0x6e, 0x71, 0x46, 0x3f, 0x69,
- }
- // paramD is -39081 in Fp.
- paramD = fp.Elt{
- 0x56, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
- // order is 2^446-0x8335dc163bb124b65129c96fde933d8d723a70aadc873d6d54a7bb0d,
- // which is the number of points in the prime subgroup.
- order = Scalar{
- 0xf3, 0x44, 0x58, 0xab, 0x92, 0xc2, 0x78, 0x23,
- 0x55, 0x8f, 0xc5, 0x8d, 0x72, 0xc2, 0x6c, 0x21,
- 0x90, 0x36, 0xd6, 0xae, 0x49, 0xdb, 0x4e, 0xc4,
- 0xe9, 0x23, 0xca, 0x7c, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f,
- }
- // residue448 is 2^448 mod order.
- residue448 = [4]uint64{
- 0x721cf5b5529eec34, 0x7a4cf635c8e9c2ab, 0xeec492d944a725bf, 0x20cd77058,
- }
- // invFour is 1/4 mod order.
- invFour = Scalar{
- 0x3d, 0x11, 0xd6, 0xaa, 0xa4, 0x30, 0xde, 0x48,
- 0xd5, 0x63, 0x71, 0xa3, 0x9c, 0x30, 0x5b, 0x08,
- 0xa4, 0x8d, 0xb5, 0x6b, 0xd2, 0xb6, 0x13, 0x71,
- 0xfa, 0x88, 0x32, 0xdf, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
- }
- // paramDTwist is -39082 in Fp. The D parameter of the twist curve.
- paramDTwist = fp.Elt{
- 0x55, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
-)
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
deleted file mode 100644
index 5a939100..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package goldilocks provides elliptic curve operations over the goldilocks curve.
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-// Curve is the Goldilocks curve x^2+y^2=z^2-39081x^2y^2.
-type Curve struct{}
-
-// Identity returns the identity point.
-func (Curve) Identity() *Point {
- return &Point{
- y: fp.One(),
- z: fp.One(),
- }
-}
-
-// IsOnCurve returns true if the point lies on the curve.
-func (Curve) IsOnCurve(P *Point) bool {
- x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- rhs, lhs := &fp.Elt{}, &fp.Elt{}
- fp.Mul(t, &P.ta, &P.tb) // t = ta*tb
- fp.Sqr(x2, &P.x) // x^2
- fp.Sqr(y2, &P.y) // y^2
- fp.Sqr(z2, &P.z) // z^2
- fp.Sqr(t2, t) // t^2
- fp.Add(lhs, x2, y2) // x^2 + y^2
- fp.Mul(rhs, t2, ¶mD) // dt^2
- fp.Add(rhs, rhs, z2) // z^2 + dt^2
- fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2)
- eq0 := fp.IsZero(lhs)
-
- fp.Mul(lhs, &P.x, &P.y) // xy
- fp.Mul(rhs, t, &P.z) // tz
- fp.Sub(lhs, lhs, rhs) // xy - tz
- eq1 := fp.IsZero(lhs)
- return eq0 && eq1
-}
-
-// Generator returns the generator point.
-func (Curve) Generator() *Point {
- return &Point{
- x: genX,
- y: genY,
- z: fp.One(),
- ta: genX,
- tb: genY,
- }
-}
-
-// Order returns the number of points in the prime subgroup.
-func (Curve) Order() Scalar { return order }
-
-// Double returns 2P.
-func (Curve) Double(P *Point) *Point { R := *P; R.Double(); return &R }
-
-// Add returns P+Q.
-func (Curve) Add(P, Q *Point) *Point { R := *P; R.Add(Q); return &R }
-
-// ScalarMult returns kP. This function runs in constant time.
-func (e Curve) ScalarMult(k *Scalar, P *Point) *Point {
- k4 := &Scalar{}
- k4.divBy4(k)
- return e.pull(twistCurve{}.ScalarMult(k4, e.push(P)))
-}
-
-// ScalarBaseMult returns kG where G is the generator point. This function runs in constant time.
-func (e Curve) ScalarBaseMult(k *Scalar) *Point {
- k4 := &Scalar{}
- k4.divBy4(k)
- return e.pull(twistCurve{}.ScalarBaseMult(k4))
-}
-
-// CombinedMult returns mG+nP, where G is the generator point. This function is non-constant time.
-func (e Curve) CombinedMult(m, n *Scalar, P *Point) *Point {
- m4 := &Scalar{}
- n4 := &Scalar{}
- m4.divBy4(m)
- n4.divBy4(n)
- return e.pull(twistCurve{}.CombinedMult(m4, n4, twistCurve{}.pull(P)))
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go
deleted file mode 100644
index b1daab85..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-func (Curve) pull(P *twistPoint) *Point { return twistCurve{}.push(P) }
-func (twistCurve) pull(P *Point) *twistPoint { return Curve{}.push(P) }
-
-// push sends a point on the Goldilocks curve to a point on the twist curve.
-func (Curve) push(P *Point) *twistPoint {
- Q := &twistPoint{}
- Px, Py, Pz := &P.x, &P.y, &P.z
- a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- *d = *a // D = A
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, a) // (x+y)^2-A
- fp.Sub(e, e, b) // E = (x+y)^2-A-B
- fp.Add(h, b, d) // H = B+D
- fp.Sub(g, b, d) // G = B-D
- fp.Sub(f, c, h) // F = C-H
- fp.Mul(&Q.z, f, g) // Z = F * G
- fp.Mul(&Q.x, e, f) // X = E * F
- fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H
- return Q
-}
-
-// push sends a point on the twist curve to a point on the Goldilocks curve.
-func (twistCurve) push(P *twistPoint) *Point {
- Q := &Point{}
- Px, Py, Pz := &P.x, &P.y, &P.z
- a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Neg(d, a) // D = -A
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, a) // (x+y)^2-A
- fp.Sub(e, e, b) // E = (x+y)^2-A-B
- fp.Add(h, b, d) // H = B+D
- fp.Sub(g, b, d) // G = B-D
- fp.Sub(f, c, h) // F = C-H
- fp.Mul(&Q.z, f, g) // Z = F * G
- fp.Mul(&Q.x, e, f) // X = E * F
- fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H
- return Q
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go
deleted file mode 100644
index 11f73de0..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package goldilocks
-
-import (
- "errors"
- "fmt"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// Point is a point on the Goldilocks Curve.
-type Point struct{ x, y, z, ta, tb fp.Elt }
-
-func (P Point) String() string {
- return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb)
-}
-
-// FromAffine creates a point from affine coordinates.
-func FromAffine(x, y *fp.Elt) (*Point, error) {
- P := &Point{
- x: *x,
- y: *y,
- z: fp.One(),
- ta: *x,
- tb: *y,
- }
- if !(Curve{}).IsOnCurve(P) {
- return P, errors.New("point not on curve")
- }
- return P, nil
-}
-
-// isLessThan returns true if 0 <= x < y, and assumes that slices are of the
-// same length and are interpreted in little-endian order.
-func isLessThan(x, y []byte) bool {
- i := len(x) - 1
- for i > 0 && x[i] == y[i] {
- i--
- }
- return x[i] < y[i]
-}
-
-// FromBytes returns a point from the input buffer.
-func FromBytes(in []byte) (*Point, error) {
- if len(in) < fp.Size+1 {
- return nil, errors.New("wrong input length")
- }
- err := errors.New("invalid decoding")
- P := &Point{}
- signX := in[fp.Size] >> 7
- copy(P.y[:], in[:fp.Size])
- p := fp.P()
- if !isLessThan(P.y[:], p[:]) {
- return nil, err
- }
-
- u, v := &fp.Elt{}, &fp.Elt{}
- one := fp.One()
- fp.Sqr(u, &P.y) // u = y^2
- fp.Mul(v, u, ¶mD) // v = dy^2
- fp.Sub(u, u, &one) // u = y^2-1
- fp.Sub(v, v, &one) // v = dy^2-1
- isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v)
- if !isQR {
- return nil, err
- }
- fp.Modp(&P.x) // x = x mod p
- if fp.IsZero(&P.x) && signX == 1 {
- return nil, err
- }
- if signX != (P.x[0] & 1) {
- fp.Neg(&P.x, &P.x)
- }
- P.ta = P.x
- P.tb = P.y
- P.z = fp.One()
- return P, nil
-}
-
-// IsIdentity returns true is P is the identity Point.
-func (P *Point) IsIdentity() bool {
- return fp.IsZero(&P.x) && !fp.IsZero(&P.y) && !fp.IsZero(&P.z) && P.y == P.z
-}
-
-// IsEqual returns true if P is equivalent to Q.
-func (P *Point) IsEqual(Q *Point) bool {
- l, r := &fp.Elt{}, &fp.Elt{}
- fp.Mul(l, &P.x, &Q.z)
- fp.Mul(r, &Q.x, &P.z)
- fp.Sub(l, l, r)
- b := fp.IsZero(l)
- fp.Mul(l, &P.y, &Q.z)
- fp.Mul(r, &Q.y, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- fp.Mul(l, &P.ta, &P.tb)
- fp.Mul(l, l, &Q.z)
- fp.Mul(r, &Q.ta, &Q.tb)
- fp.Mul(r, r, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- return b
-}
-
-// Neg obtains the inverse of the Point.
-func (P *Point) Neg() { fp.Neg(&P.x, &P.x); fp.Neg(&P.ta, &P.ta) }
-
-// ToAffine returns the x,y affine coordinates of P.
-func (P *Point) ToAffine() (x, y fp.Elt) {
- fp.Inv(&P.z, &P.z) // 1/z
- fp.Mul(&P.x, &P.x, &P.z) // x/z
- fp.Mul(&P.y, &P.y, &P.z) // y/z
- fp.Modp(&P.x)
- fp.Modp(&P.y)
- fp.SetOne(&P.z)
- P.ta = P.x
- P.tb = P.y
- return P.x, P.y
-}
-
-// ToBytes stores P into a slice of bytes.
-func (P *Point) ToBytes(out []byte) error {
- if len(out) < fp.Size+1 {
- return errors.New("invalid decoding")
- }
- x, y := P.ToAffine()
- out[fp.Size] = (x[0] & 1) << 7
- return fp.ToBytes(out[:fp.Size], &y)
-}
-
-// MarshalBinary encodes the receiver into a binary form and returns the result.
-func (P *Point) MarshalBinary() (data []byte, err error) {
- data = make([]byte, fp.Size+1)
- err = P.ToBytes(data[:fp.Size+1])
- return data, err
-}
-
-// UnmarshalBinary must be able to decode the form generated by MarshalBinary.
-func (P *Point) UnmarshalBinary(data []byte) error { Q, err := FromBytes(data); *P = *Q; return err }
-
-// Double sets P = 2Q.
-func (P *Point) Double() { P.Add(P) }
-
-// Add sets P =P+Q..
-func (P *Point) Add(Q *Point) {
- // This is formula (5) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- x1, y1, z1, ta1, tb1 := &P.x, &P.y, &P.z, &P.ta, &P.tb
- x2, y2, z2, ta2, tb2 := &Q.x, &Q.y, &Q.z, &Q.ta, &Q.tb
- x3, y3, z3, E, H := &P.x, &P.y, &P.z, &P.ta, &P.tb
- A, B, C, D := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- t1, t2, F, G := C, D, &fp.Elt{}, &fp.Elt{}
- fp.Mul(t1, ta1, tb1) // t1 = ta1*tb1
- fp.Mul(t2, ta2, tb2) // t2 = ta2*tb2
- fp.Mul(A, x1, x2) // A = x1*x2
- fp.Mul(B, y1, y2) // B = y1*y2
- fp.Mul(C, t1, t2) // t1*t2
- fp.Mul(C, C, ¶mD) // C = d*t1*t2
- fp.Mul(D, z1, z2) // D = z1*z2
- fp.Add(F, x1, y1) // x1+y1
- fp.Add(E, x2, y2) // x2+y2
- fp.Mul(E, E, F) // (x1+y1)*(x2+y2)
- fp.Sub(E, E, A) // (x1+y1)*(x2+y2)-A
- fp.Sub(E, E, B) // E = (x1+y1)*(x2+y2)-A-B
- fp.Sub(F, D, C) // F = D-C
- fp.Add(G, D, C) // G = D+C
- fp.Sub(H, B, A) // H = B-A
- fp.Mul(z3, F, G) // Z = F * G
- fp.Mul(x3, E, F) // X = E * F
- fp.Mul(y3, G, H) // Y = G * H, T = E * H
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go
deleted file mode 100644
index f98117b2..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package goldilocks
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// ScalarSize is the size (in bytes) of scalars.
-const ScalarSize = 56 // 448 / 8
-
-// _N is the number of 64-bit words to store scalars.
-const _N = 7 // 448 / 64
-
-// Scalar represents a positive integer stored in little-endian order.
-type Scalar [ScalarSize]byte
-
-type scalar64 [_N]uint64
-
-func (z *scalar64) fromScalar(x *Scalar) {
- z[0] = binary.LittleEndian.Uint64(x[0*8 : 1*8])
- z[1] = binary.LittleEndian.Uint64(x[1*8 : 2*8])
- z[2] = binary.LittleEndian.Uint64(x[2*8 : 3*8])
- z[3] = binary.LittleEndian.Uint64(x[3*8 : 4*8])
- z[4] = binary.LittleEndian.Uint64(x[4*8 : 5*8])
- z[5] = binary.LittleEndian.Uint64(x[5*8 : 6*8])
- z[6] = binary.LittleEndian.Uint64(x[6*8 : 7*8])
-}
-
-func (z *scalar64) toScalar(x *Scalar) {
- binary.LittleEndian.PutUint64(x[0*8:1*8], z[0])
- binary.LittleEndian.PutUint64(x[1*8:2*8], z[1])
- binary.LittleEndian.PutUint64(x[2*8:3*8], z[2])
- binary.LittleEndian.PutUint64(x[3*8:4*8], z[3])
- binary.LittleEndian.PutUint64(x[4*8:5*8], z[4])
- binary.LittleEndian.PutUint64(x[5*8:6*8], z[5])
- binary.LittleEndian.PutUint64(x[6*8:7*8], z[6])
-}
-
-// add calculates z = x + y. Assumes len(z) > max(len(x),len(y)).
-func add(z, x, y []uint64) uint64 {
- l, L, zz := len(x), len(y), y
- if l > L {
- l, L, zz = L, l, x
- }
- c := uint64(0)
- for i := 0; i < l; i++ {
- z[i], c = bits.Add64(x[i], y[i], c)
- }
- for i := l; i < L; i++ {
- z[i], c = bits.Add64(zz[i], 0, c)
- }
- return c
-}
-
-// sub calculates z = x - y. Assumes len(z) > max(len(x),len(y)).
-func sub(z, x, y []uint64) uint64 {
- l, L, zz := len(x), len(y), y
- if l > L {
- l, L, zz = L, l, x
- }
- c := uint64(0)
- for i := 0; i < l; i++ {
- z[i], c = bits.Sub64(x[i], y[i], c)
- }
- for i := l; i < L; i++ {
- z[i], c = bits.Sub64(zz[i], 0, c)
- }
- return c
-}
-
-// mulWord calculates z = x * y. Assumes len(z) >= len(x)+1.
-func mulWord(z, x []uint64, y uint64) {
- for i := range z {
- z[i] = 0
- }
- carry := uint64(0)
- for i := range x {
- hi, lo := bits.Mul64(x[i], y)
- lo, cc := bits.Add64(lo, z[i], 0)
- hi, _ = bits.Add64(hi, 0, cc)
- z[i], cc = bits.Add64(lo, carry, 0)
- carry, _ = bits.Add64(hi, 0, cc)
- }
- z[len(x)] = carry
-}
-
-// Cmov moves x into z if b=1.
-func (z *scalar64) Cmov(b uint64, x *scalar64) {
- m := uint64(0) - b
- for i := range z {
- z[i] = (z[i] &^ m) | (x[i] & m)
- }
-}
-
-// leftShift shifts to the left the words of z returning the more significant word.
-func (z *scalar64) leftShift(low uint64) uint64 {
- high := z[_N-1]
- for i := _N - 1; i > 0; i-- {
- z[i] = z[i-1]
- }
- z[0] = low
- return high
-}
-
-// reduceOneWord calculates z = z + 2^448*x such that the result fits in a Scalar.
-func (z *scalar64) reduceOneWord(x uint64) {
- prod := (&scalar64{})[:]
- mulWord(prod, residue448[:], x)
- cc := add(z[:], z[:], prod)
- mulWord(prod, residue448[:], cc)
- add(z[:], z[:], prod)
-}
-
-// modOrder reduces z mod order.
-func (z *scalar64) modOrder() {
- var o64, x scalar64
- o64.fromScalar(&order)
- // Performs: while (z >= order) { z = z-order }
- // At most 8 (eight) iterations reduce 3 bits by subtracting.
- for i := 0; i < 8; i++ {
- c := sub(x[:], z[:], o64[:]) // (c || x) = z-order
- z.Cmov(1-c, &x) // if c != 0 { z = x }
- }
-}
-
-// FromBytes stores z = x mod order, where x is a number stored in little-endian order.
-func (z *Scalar) FromBytes(x []byte) {
- n := len(x)
- nCeil := (n + 7) >> 3
- for i := range z {
- z[i] = 0
- }
- if nCeil < _N {
- copy(z[:], x)
- return
- }
- copy(z[:], x[8*(nCeil-_N):])
- var z64 scalar64
- z64.fromScalar(z)
- for i := nCeil - _N - 1; i >= 0; i-- {
- low := binary.LittleEndian.Uint64(x[8*i:])
- high := z64.leftShift(low)
- z64.reduceOneWord(high)
- }
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// divBy4 calculates z = x/4 mod order.
-func (z *Scalar) divBy4(x *Scalar) { z.Mul(x, &invFour) }
-
-// Red reduces z mod order.
-func (z *Scalar) Red() { var t scalar64; t.fromScalar(z); t.modOrder(); t.toScalar(z) }
-
-// Neg calculates z = -z mod order.
-func (z *Scalar) Neg() { z.Sub(&order, z) }
-
-// Add calculates z = x+y mod order.
-func (z *Scalar) Add(x, y *Scalar) {
- var z64, x64, y64, t scalar64
- x64.fromScalar(x)
- y64.fromScalar(y)
- c := add(z64[:], x64[:], y64[:])
- add(t[:], z64[:], residue448[:])
- z64.Cmov(c, &t)
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// Sub calculates z = x-y mod order.
-func (z *Scalar) Sub(x, y *Scalar) {
- var z64, x64, y64, t scalar64
- x64.fromScalar(x)
- y64.fromScalar(y)
- c := sub(z64[:], x64[:], y64[:])
- sub(t[:], z64[:], residue448[:])
- z64.Cmov(c, &t)
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// Mul calculates z = x*y mod order.
-func (z *Scalar) Mul(x, y *Scalar) {
- var z64, x64, y64 scalar64
- prod := (&[_N + 1]uint64{})[:]
- x64.fromScalar(x)
- y64.fromScalar(y)
- mulWord(prod, x64[:], y64[_N-1])
- copy(z64[:], prod[:_N])
- z64.reduceOneWord(prod[_N])
- for i := _N - 2; i >= 0; i-- {
- h := z64.leftShift(0)
- z64.reduceOneWord(h)
- mulWord(prod, x64[:], y64[i])
- c := add(z64[:], z64[:], prod[:_N])
- z64.reduceOneWord(prod[_N] + c)
- }
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// IsZero returns true if z=0.
-func (z *Scalar) IsZero() bool { z.Red(); return *z == Scalar{} }
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go
deleted file mode 100644
index 8cd4e333..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package goldilocks
-
-import (
- "crypto/subtle"
- "math/bits"
-
- "github.com/cloudflare/circl/internal/conv"
- "github.com/cloudflare/circl/math"
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogeneous to Goldilocks.
-type twistCurve struct{}
-
-// Identity returns the identity point.
-func (twistCurve) Identity() *twistPoint {
- return &twistPoint{
- y: fp.One(),
- z: fp.One(),
- }
-}
-
-// subYDiv16 update x = (x - y) / 16.
-func subYDiv16(x *scalar64, y int64) {
- s := uint64(y >> 63)
- x0, b0 := bits.Sub64((*x)[0], uint64(y), 0)
- x1, b1 := bits.Sub64((*x)[1], s, b0)
- x2, b2 := bits.Sub64((*x)[2], s, b1)
- x3, b3 := bits.Sub64((*x)[3], s, b2)
- x4, b4 := bits.Sub64((*x)[4], s, b3)
- x5, b5 := bits.Sub64((*x)[5], s, b4)
- x6, _ := bits.Sub64((*x)[6], s, b5)
- x[0] = (x0 >> 4) | (x1 << 60)
- x[1] = (x1 >> 4) | (x2 << 60)
- x[2] = (x2 >> 4) | (x3 << 60)
- x[3] = (x3 >> 4) | (x4 << 60)
- x[4] = (x4 >> 4) | (x5 << 60)
- x[5] = (x5 >> 4) | (x6 << 60)
- x[6] = (x6 >> 4)
-}
-
-func recodeScalar(d *[113]int8, k *Scalar) {
- var k64 scalar64
- k64.fromScalar(k)
- for i := 0; i < 112; i++ {
- d[i] = int8((k64[0] & 0x1f) - 16)
- subYDiv16(&k64, int64(d[i]))
- }
- d[112] = int8(k64[0])
-}
-
-// ScalarMult returns kP.
-func (e twistCurve) ScalarMult(k *Scalar, P *twistPoint) *twistPoint {
- var TabP [8]preTwistPointProy
- var S preTwistPointProy
- var d [113]int8
-
- var isZero int
- if k.IsZero() {
- isZero = 1
- }
- subtle.ConstantTimeCopy(isZero, k[:], order[:])
-
- minusK := *k
- isEven := 1 - int(k[0]&0x1)
- minusK.Neg()
- subtle.ConstantTimeCopy(isEven, k[:], minusK[:])
- recodeScalar(&d, k)
-
- P.oddMultiples(TabP[:])
- Q := e.Identity()
- for i := 112; i >= 0; i-- {
- Q.Double()
- Q.Double()
- Q.Double()
- Q.Double()
- mask := d[i] >> 7
- absDi := (d[i] + mask) ^ mask
- inx := int32((absDi - 1) >> 1)
- sig := int((d[i] >> 7) & 0x1)
- for j := range TabP {
- S.cmov(&TabP[j], uint(subtle.ConstantTimeEq(inx, int32(j))))
- }
- S.cneg(sig)
- Q.mixAdd(&S)
- }
- Q.cneg(uint(isEven))
- return Q
-}
-
-const (
- omegaFix = 7
- omegaVar = 5
-)
-
-// CombinedMult returns mG+nP.
-func (e twistCurve) CombinedMult(m, n *Scalar, P *twistPoint) *twistPoint {
- nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m[:]), omegaFix)
- nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n[:]), omegaVar)
-
- if len(nafFix) > len(nafVar) {
- nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...)
- } else if len(nafFix) < len(nafVar) {
- nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...)
- }
-
- var TabQ [1 << (omegaVar - 2)]preTwistPointProy
- P.oddMultiples(TabQ[:])
- Q := e.Identity()
- for i := len(nafFix) - 1; i >= 0; i-- {
- Q.Double()
- // Generator point
- if nafFix[i] != 0 {
- idxM := absolute(nafFix[i]) >> 1
- R := tabVerif[idxM]
- if nafFix[i] < 0 {
- R.neg()
- }
- Q.mixAddZ1(&R)
- }
- // Variable input point
- if nafVar[i] != 0 {
- idxN := absolute(nafVar[i]) >> 1
- S := TabQ[idxN]
- if nafVar[i] < 0 {
- S.neg()
- }
- Q.mixAdd(&S)
- }
- }
- return Q
-}
-
-// absolute returns always a positive value.
-func absolute(x int32) int32 {
- mask := x >> 31
- return (x + mask) ^ mask
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go
deleted file mode 100644
index c55db77b..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package goldilocks
-
-import (
- "fmt"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-type twistPoint struct{ x, y, z, ta, tb fp.Elt }
-
-type preTwistPointAffine struct{ addYX, subYX, dt2 fp.Elt }
-
-type preTwistPointProy struct {
- preTwistPointAffine
- z2 fp.Elt
-}
-
-func (P *twistPoint) String() string {
- return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb)
-}
-
-// cneg conditionally negates the point if b=1.
-func (P *twistPoint) cneg(b uint) {
- t := &fp.Elt{}
- fp.Neg(t, &P.x)
- fp.Cmov(&P.x, t, b)
- fp.Neg(t, &P.ta)
- fp.Cmov(&P.ta, t, b)
-}
-
-// Double updates P with 2P.
-func (P *twistPoint) Double() {
- // This is formula (7) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Add(h, a, b) // H = A+B
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, h) // E = (x+y)^2-A-B
- fp.Sub(g, b, a) // G = B-A
- fp.Sub(f, c, g) // F = C-G
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q = 1.
-func (P *twistPoint) mixAddZ1(Q *preTwistPointAffine) {
- fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 (z2=1)
- P.coreAddition(Q)
-}
-
-// coreAddition calculates P=P+Q for curves with A=-1.
-func (P *twistPoint) coreAddition(Q *preTwistPointAffine) {
- // This is the formula following (5) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2
- a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb
- fp.Mul(c, Pta, Ptb) // t1 = ta*tb
- fp.Sub(h, Py, Px) // y1-x1
- fp.Add(b, Py, Px) // y1+x1
- fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2)
- fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2)
- fp.Mul(c, c, dt2) // C = 2*D*t1*t2
- fp.Sub(e, b, a) // E = B-A
- fp.Add(h, b, a) // H = B+A
- fp.Sub(f, d, c) // F = D-C
- fp.Add(g, d, c) // G = D+C
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *preTwistPointAffine) neg() {
- P.addYX, P.subYX = P.subYX, P.addYX
- fp.Neg(&P.dt2, &P.dt2)
-}
-
-func (P *preTwistPointAffine) cneg(b int) {
- t := &fp.Elt{}
- fp.Cswap(&P.addYX, &P.subYX, uint(b))
- fp.Neg(t, &P.dt2)
- fp.Cmov(&P.dt2, t, uint(b))
-}
-
-func (P *preTwistPointAffine) cmov(Q *preTwistPointAffine, b uint) {
- fp.Cmov(&P.addYX, &Q.addYX, b)
- fp.Cmov(&P.subYX, &Q.subYX, b)
- fp.Cmov(&P.dt2, &Q.dt2, b)
-}
-
-// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q != 1.
-func (P *twistPoint) mixAdd(Q *preTwistPointProy) {
- fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2
- P.coreAddition(&Q.preTwistPointAffine)
-}
-
-// oddMultiples calculates T[i] = (2*i-1)P for 0 < i < len(T).
-func (P *twistPoint) oddMultiples(T []preTwistPointProy) {
- if n := len(T); n > 0 {
- T[0].FromTwistPoint(P)
- _2P := *P
- _2P.Double()
- R := &preTwistPointProy{}
- R.FromTwistPoint(&_2P)
- for i := 1; i < n; i++ {
- P.mixAdd(R)
- T[i].FromTwistPoint(P)
- }
- }
-}
-
-// cmov conditionally moves Q into P if b=1.
-func (P *preTwistPointProy) cmov(Q *preTwistPointProy, b uint) {
- P.preTwistPointAffine.cmov(&Q.preTwistPointAffine, b)
- fp.Cmov(&P.z2, &Q.z2, b)
-}
-
-// FromTwistPoint precomputes some coordinates of Q for missed addition.
-func (P *preTwistPointProy) FromTwistPoint(Q *twistPoint) {
- fp.Add(&P.addYX, &Q.y, &Q.x) // addYX = X + Y
- fp.Sub(&P.subYX, &Q.y, &Q.x) // subYX = Y - X
- fp.Mul(&P.dt2, &Q.ta, &Q.tb) // T = ta*tb
- fp.Mul(&P.dt2, &P.dt2, ¶mDTwist) // D*T
- fp.Add(&P.dt2, &P.dt2, &P.dt2) // dt2 = 2*D*T
- fp.Add(&P.z2, &Q.z, &Q.z) // z2 = 2*Z
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go
deleted file mode 100644
index ed432e02..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-var tabFixMult = [fxV][fx2w1]preTwistPointAffine{
- {
- {
- addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55},
- },
- {
- addYX: fp.Elt{0xca, 0xd8, 0x7d, 0x86, 0x1a, 0xef, 0xad, 0x11, 0xe3, 0x27, 0x41, 0x7e, 0x7f, 0x3e, 0xa9, 0xd2, 0xb5, 0x4e, 0x50, 0xe0, 0x77, 0x91, 0xc2, 0x13, 0x52, 0x73, 0x41, 0x09, 0xa6, 0x57, 0x9a, 0xc8, 0xa8, 0x90, 0x9d, 0x26, 0x14, 0xbb, 0xa1, 0x2a, 0xf7, 0x45, 0x43, 0x4e, 0xea, 0x35, 0x62, 0xe1, 0x08, 0x85, 0x46, 0xb8, 0x24, 0x05, 0x2d, 0xab},
- subYX: fp.Elt{0x9b, 0xe6, 0xd3, 0xe5, 0xfe, 0x50, 0x36, 0x3c, 0x3c, 0x6d, 0x74, 0x1d, 0x74, 0xc0, 0xde, 0x5b, 0x45, 0x27, 0xe5, 0x12, 0xee, 0x63, 0x35, 0x6b, 0x13, 0xe2, 0x41, 0x6b, 0x3a, 0x05, 0x2b, 0xb1, 0x89, 0x26, 0xb6, 0xc6, 0xd1, 0x84, 0xff, 0x0e, 0x9b, 0xa3, 0xfb, 0x21, 0x36, 0x6b, 0x01, 0xf7, 0x9f, 0x7c, 0xeb, 0xf5, 0x18, 0x7a, 0x2a, 0x70},
- dt2: fp.Elt{0x09, 0xad, 0x99, 0x1a, 0x38, 0xd3, 0xdf, 0x22, 0x37, 0x32, 0x61, 0x8b, 0xf3, 0x19, 0x48, 0x08, 0xe8, 0x49, 0xb6, 0x4a, 0xa7, 0xed, 0xa4, 0xa2, 0xee, 0x86, 0xd7, 0x31, 0x5e, 0xce, 0x95, 0x76, 0x86, 0x42, 0x1c, 0x9d, 0x07, 0x14, 0x8c, 0x34, 0x18, 0x9c, 0x6d, 0x3a, 0xdf, 0xa9, 0xe8, 0x36, 0x7e, 0xe4, 0x95, 0xbe, 0xb5, 0x09, 0xf8, 0x9c},
- },
- {
- addYX: fp.Elt{0x51, 0xdb, 0x49, 0xa8, 0x9f, 0xe3, 0xd7, 0xec, 0x0d, 0x0f, 0x49, 0xe8, 0xb6, 0xc5, 0x0f, 0x5a, 0x1c, 0xce, 0x54, 0x0d, 0xb1, 0x8d, 0x5b, 0xbf, 0xf4, 0xaa, 0x34, 0x77, 0xc4, 0x5d, 0x59, 0xb6, 0xc5, 0x0e, 0x5a, 0xd8, 0x5b, 0x30, 0xc2, 0x1d, 0xec, 0x85, 0x1c, 0x42, 0xbe, 0x24, 0x2e, 0x50, 0x55, 0x44, 0xb2, 0x3a, 0x01, 0xaa, 0x98, 0xfb},
- subYX: fp.Elt{0xe7, 0x29, 0xb7, 0xd0, 0xaa, 0x4f, 0x32, 0x53, 0x56, 0xde, 0xbc, 0xd1, 0x92, 0x5d, 0x19, 0xbe, 0xa3, 0xe3, 0x75, 0x48, 0xe0, 0x7a, 0x1b, 0x54, 0x7a, 0xb7, 0x41, 0x77, 0x84, 0x38, 0xdd, 0x14, 0x9f, 0xca, 0x3f, 0xa3, 0xc8, 0xa7, 0x04, 0x70, 0xf1, 0x4d, 0x3d, 0xb3, 0x84, 0x79, 0xcb, 0xdb, 0xe4, 0xc5, 0x42, 0x9b, 0x57, 0x19, 0xf1, 0x2d},
- dt2: fp.Elt{0x20, 0xb4, 0x94, 0x9e, 0xdf, 0x31, 0x44, 0x0b, 0xc9, 0x7b, 0x75, 0x40, 0x9d, 0xd1, 0x96, 0x39, 0x70, 0x71, 0x15, 0xc8, 0x93, 0xd5, 0xc5, 0xe5, 0xba, 0xfe, 0xee, 0x08, 0x6a, 0x98, 0x0a, 0x1b, 0xb2, 0xaa, 0x3a, 0xf4, 0xa4, 0x79, 0xf9, 0x8e, 0x4d, 0x65, 0x10, 0x9b, 0x3a, 0x6e, 0x7c, 0x87, 0x94, 0x92, 0x11, 0x65, 0xbf, 0x1a, 0x09, 0xde},
- },
- {
- addYX: fp.Elt{0xf3, 0x84, 0x76, 0x77, 0xa5, 0x6b, 0x27, 0x3b, 0x83, 0x3d, 0xdf, 0xa0, 0xeb, 0x32, 0x6d, 0x58, 0x81, 0x57, 0x64, 0xc2, 0x21, 0x7c, 0x9b, 0xea, 0xe6, 0xb0, 0x93, 0xf9, 0xe7, 0xc3, 0xed, 0x5a, 0x8e, 0xe2, 0xb4, 0x72, 0x76, 0x66, 0x0f, 0x22, 0x29, 0x94, 0x3e, 0x63, 0x48, 0x5e, 0x80, 0xcb, 0xac, 0xfa, 0x95, 0xb6, 0x4b, 0xc4, 0x95, 0x33},
- subYX: fp.Elt{0x0c, 0x55, 0xd1, 0x5e, 0x5f, 0xbf, 0xbf, 0xe2, 0x4c, 0xfc, 0x37, 0x4a, 0xc4, 0xb1, 0xf4, 0x83, 0x61, 0x93, 0x60, 0x8e, 0x9f, 0x31, 0xf0, 0xa0, 0x41, 0xff, 0x1d, 0xe2, 0x7f, 0xca, 0x40, 0xd6, 0x88, 0xe8, 0x91, 0x61, 0xe2, 0x11, 0x18, 0x83, 0xf3, 0x25, 0x2f, 0x3f, 0x49, 0x40, 0xd4, 0x83, 0xe2, 0xd7, 0x74, 0x6a, 0x16, 0x86, 0x4e, 0xab},
- dt2: fp.Elt{0xdd, 0x58, 0x65, 0xd8, 0x9f, 0xdd, 0x70, 0x7f, 0x0f, 0xec, 0xbd, 0x5c, 0x5c, 0x9b, 0x7e, 0x1b, 0x9f, 0x79, 0x36, 0x1f, 0xfd, 0x79, 0x10, 0x1c, 0x52, 0xf3, 0x22, 0xa4, 0x1f, 0x71, 0x6e, 0x63, 0x14, 0xf4, 0xa7, 0x3e, 0xbe, 0xad, 0x43, 0x30, 0x38, 0x8c, 0x29, 0xc6, 0xcf, 0x50, 0x75, 0x21, 0xe5, 0x78, 0xfd, 0xb0, 0x9a, 0xc4, 0x6d, 0xd4},
- },
- },
- {
- {
- addYX: fp.Elt{0x7a, 0xa1, 0x38, 0xa6, 0xfd, 0x0e, 0x96, 0xd5, 0x26, 0x76, 0x86, 0x70, 0x80, 0x30, 0xa6, 0x67, 0xeb, 0xf4, 0x39, 0xdb, 0x22, 0xf5, 0x9f, 0x98, 0xe4, 0xb5, 0x3a, 0x0c, 0x59, 0xbf, 0x85, 0xc6, 0xf0, 0x0b, 0x1c, 0x41, 0x38, 0x09, 0x01, 0xdb, 0xd6, 0x3c, 0xb7, 0xf1, 0x08, 0x6b, 0x4b, 0x9e, 0x63, 0x53, 0x83, 0xd3, 0xab, 0xa3, 0x72, 0x0d},
- subYX: fp.Elt{0x84, 0x68, 0x25, 0xe8, 0xe9, 0x8f, 0x91, 0xbf, 0xf7, 0xa4, 0x30, 0xae, 0xea, 0x9f, 0xdd, 0x56, 0x64, 0x09, 0xc9, 0x54, 0x68, 0x4e, 0x33, 0xc5, 0x6f, 0x7b, 0x2d, 0x52, 0x2e, 0x42, 0xbe, 0xbe, 0xf5, 0x64, 0xbf, 0x77, 0x54, 0xdf, 0xb0, 0x10, 0xd2, 0x16, 0x5d, 0xce, 0xaf, 0x9f, 0xfb, 0xa3, 0x63, 0x50, 0xcb, 0xc0, 0xd0, 0x88, 0x44, 0xa3},
- dt2: fp.Elt{0xc3, 0x8b, 0xa5, 0xf1, 0x44, 0xe4, 0x41, 0xcd, 0x75, 0xe3, 0x17, 0x69, 0x5b, 0xb9, 0xbb, 0xee, 0x82, 0xbb, 0xce, 0x57, 0xdf, 0x2a, 0x9c, 0x12, 0xab, 0x66, 0x08, 0x68, 0x05, 0x1b, 0x87, 0xee, 0x5d, 0x1e, 0x18, 0x14, 0x22, 0x4b, 0x99, 0x61, 0x75, 0x28, 0xe7, 0x65, 0x1c, 0x36, 0xb6, 0x18, 0x09, 0xa8, 0xdf, 0xef, 0x30, 0x35, 0xbc, 0x58},
- },
- {
- addYX: fp.Elt{0xc5, 0xd3, 0x0e, 0x6f, 0xaf, 0x06, 0x69, 0xc4, 0x07, 0x9e, 0x58, 0x6e, 0x3f, 0x49, 0xd9, 0x0a, 0x3c, 0x2c, 0x37, 0xcd, 0x27, 0x4d, 0x87, 0x91, 0x7a, 0xb0, 0x28, 0xad, 0x2f, 0x68, 0x92, 0x05, 0x97, 0xf1, 0x30, 0x5f, 0x4c, 0x10, 0x20, 0x30, 0xd3, 0x08, 0x3f, 0xc1, 0xc6, 0xb7, 0xb5, 0xd1, 0x71, 0x7b, 0xa8, 0x0a, 0xd8, 0xf5, 0x17, 0xcf},
- subYX: fp.Elt{0x64, 0xd4, 0x8f, 0x91, 0x40, 0xab, 0x6e, 0x1a, 0x62, 0x83, 0xdc, 0xd7, 0x30, 0x1a, 0x4a, 0x2a, 0x4c, 0x54, 0x86, 0x19, 0x81, 0x5d, 0x04, 0x52, 0xa3, 0xca, 0x82, 0x38, 0xdc, 0x1e, 0xf0, 0x7a, 0x78, 0x76, 0x49, 0x4f, 0x71, 0xc4, 0x74, 0x2f, 0xf0, 0x5b, 0x2e, 0x5e, 0xac, 0xef, 0x17, 0xe4, 0x8e, 0x6e, 0xed, 0x43, 0x23, 0x61, 0x99, 0x49},
- dt2: fp.Elt{0x64, 0x90, 0x72, 0x76, 0xf8, 0x2c, 0x7d, 0x57, 0xf9, 0x30, 0x5e, 0x7a, 0x10, 0x74, 0x19, 0x39, 0xd9, 0xaf, 0x0a, 0xf1, 0x43, 0xed, 0x88, 0x9c, 0x8b, 0xdc, 0x9b, 0x1c, 0x90, 0xe7, 0xf7, 0xa3, 0xa5, 0x0d, 0xc6, 0xbc, 0x30, 0xfb, 0x91, 0x1a, 0x51, 0xba, 0x2d, 0xbe, 0x89, 0xdf, 0x1d, 0xdc, 0x53, 0xa8, 0x82, 0x8a, 0xd3, 0x8d, 0x16, 0x68},
- },
- {
- addYX: fp.Elt{0xef, 0x5c, 0xe3, 0x74, 0xbf, 0x13, 0x4a, 0xbf, 0x66, 0x73, 0x64, 0xb7, 0xd4, 0xce, 0x98, 0x82, 0x05, 0xfa, 0x98, 0x0c, 0x0a, 0xae, 0xe5, 0x6b, 0x9f, 0xac, 0xbb, 0x6e, 0x1f, 0xcf, 0xff, 0xa6, 0x71, 0x9a, 0xa8, 0x7a, 0x9e, 0x64, 0x1f, 0x20, 0x4a, 0x61, 0xa2, 0xd6, 0x50, 0xe3, 0xba, 0x81, 0x0c, 0x50, 0x59, 0x69, 0x59, 0x15, 0x55, 0xdb},
- subYX: fp.Elt{0xe8, 0x77, 0x4d, 0xe8, 0x66, 0x3d, 0xc1, 0x00, 0x3c, 0xf2, 0x25, 0x00, 0xdc, 0xb2, 0xe5, 0x9b, 0x12, 0x89, 0xf3, 0xd6, 0xea, 0x85, 0x60, 0xfe, 0x67, 0x91, 0xfd, 0x04, 0x7c, 0xe0, 0xf1, 0x86, 0x06, 0x11, 0x66, 0xee, 0xd4, 0xd5, 0xbe, 0x3b, 0x0f, 0xe3, 0x59, 0xb3, 0x4f, 0x00, 0xb6, 0xce, 0x80, 0xc1, 0x61, 0xf7, 0xaf, 0x04, 0x6a, 0x3c},
- dt2: fp.Elt{0x00, 0xd7, 0x32, 0x93, 0x67, 0x70, 0x6f, 0xd7, 0x69, 0xab, 0xb1, 0xd3, 0xdc, 0xd6, 0xa8, 0xdd, 0x35, 0x25, 0xca, 0xd3, 0x8a, 0x6d, 0xce, 0xfb, 0xfd, 0x2b, 0x83, 0xf0, 0xd4, 0xac, 0x66, 0xfb, 0x72, 0x87, 0x7e, 0x55, 0xb7, 0x91, 0x58, 0x10, 0xc3, 0x11, 0x7e, 0x15, 0xfe, 0x7c, 0x55, 0x90, 0xa3, 0x9e, 0xed, 0x9a, 0x7f, 0xa7, 0xb7, 0xeb},
- },
- {
- addYX: fp.Elt{0x25, 0x0f, 0xc2, 0x09, 0x9c, 0x10, 0xc8, 0x7c, 0x93, 0xa7, 0xbe, 0xe9, 0x26, 0x25, 0x7c, 0x21, 0xfe, 0xe7, 0x5f, 0x3c, 0x02, 0x83, 0xa7, 0x9e, 0xdf, 0xc0, 0x94, 0x2b, 0x7d, 0x1a, 0xd0, 0x1d, 0xcc, 0x2e, 0x7d, 0xd4, 0x85, 0xe7, 0xc1, 0x15, 0x66, 0xd6, 0xd6, 0x32, 0xb8, 0xf7, 0x63, 0xaa, 0x3b, 0xa5, 0xea, 0x49, 0xad, 0x88, 0x9b, 0x66},
- subYX: fp.Elt{0x09, 0x97, 0x79, 0x36, 0x41, 0x56, 0x9b, 0xdf, 0x15, 0xd8, 0x43, 0x28, 0x17, 0x5b, 0x96, 0xc9, 0xcf, 0x39, 0x1f, 0x13, 0xf7, 0x4d, 0x1d, 0x1f, 0xda, 0x51, 0x56, 0xe7, 0x0a, 0x5a, 0x65, 0xb6, 0x2a, 0x87, 0x49, 0x86, 0xc2, 0x2b, 0xcd, 0xfe, 0x07, 0xf6, 0x4c, 0xe2, 0x1d, 0x9b, 0xd8, 0x82, 0x09, 0x5b, 0x11, 0x10, 0x62, 0x56, 0x89, 0xbd},
- dt2: fp.Elt{0xd9, 0x15, 0x73, 0xf2, 0x96, 0x35, 0x53, 0xb0, 0xe7, 0xa8, 0x0b, 0x93, 0x35, 0x0b, 0x3a, 0x00, 0xf5, 0x18, 0xb1, 0xc3, 0x12, 0x3f, 0x91, 0x17, 0xc1, 0x4c, 0x15, 0x5a, 0x86, 0x92, 0x11, 0xbd, 0x44, 0x40, 0x5a, 0x7b, 0x15, 0x89, 0xba, 0xc1, 0xc1, 0xbc, 0x43, 0x45, 0xe6, 0x52, 0x02, 0x73, 0x0a, 0xd0, 0x2a, 0x19, 0xda, 0x47, 0xa8, 0xff},
- },
- },
-}
-
-// tabVerif contains the odd multiples of P. The entry T[i] = (2i+1)P, where
-// P = phi(G) and G is the generator of the Goldilocks curve, and phi is a
-// 4-degree isogeny.
-var tabVerif = [1 << (omegaFix - 2)]preTwistPointAffine{
- { /* 1P*/
- addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55},
- },
- { /* 3P*/
- addYX: fp.Elt{0xd1, 0xe9, 0xa8, 0x33, 0x20, 0x76, 0x18, 0x08, 0x45, 0x2a, 0xc9, 0x67, 0x2a, 0xc3, 0x15, 0x24, 0xf9, 0x74, 0x21, 0x30, 0x99, 0x59, 0x8b, 0xb2, 0xf0, 0xa4, 0x07, 0xe2, 0x6a, 0x36, 0x8d, 0xd9, 0xd2, 0x4a, 0x7f, 0x73, 0x50, 0x39, 0x3d, 0xaa, 0xa7, 0x51, 0x73, 0x0d, 0x2b, 0x8b, 0x96, 0x47, 0xac, 0x3c, 0x5d, 0xaa, 0x39, 0x9c, 0xcf, 0xd5},
- subYX: fp.Elt{0x6b, 0x11, 0x5d, 0x1a, 0xf9, 0x41, 0x9d, 0xc5, 0x30, 0x3e, 0xad, 0x25, 0x2c, 0x04, 0x45, 0xea, 0xcc, 0x67, 0x07, 0x85, 0xe9, 0xda, 0x0e, 0xb5, 0x40, 0xb7, 0x32, 0xb4, 0x49, 0xdd, 0xff, 0xaa, 0xfc, 0xbb, 0x19, 0xca, 0x8b, 0x79, 0x2b, 0x8f, 0x8d, 0x00, 0x33, 0xc2, 0xad, 0xe9, 0xd3, 0x12, 0xa8, 0xaa, 0x87, 0x62, 0xad, 0x2d, 0xff, 0xa4},
- dt2: fp.Elt{0xb0, 0xaf, 0x3b, 0xea, 0xf0, 0x42, 0x0b, 0x5e, 0x88, 0xd3, 0x98, 0x08, 0x87, 0x59, 0x72, 0x0a, 0xc2, 0xdf, 0xcb, 0x7f, 0x59, 0xb5, 0x4c, 0x63, 0x68, 0xe8, 0x41, 0x38, 0x67, 0x4f, 0xe9, 0xc6, 0xb2, 0x6b, 0x08, 0xa7, 0xf7, 0x0e, 0xcd, 0xea, 0xca, 0x3d, 0xaf, 0x8e, 0xda, 0x4b, 0x2e, 0xd2, 0x88, 0x64, 0x8d, 0xc5, 0x5f, 0x76, 0x0f, 0x3d},
- },
- { /* 5P*/
- addYX: fp.Elt{0xe5, 0x65, 0xc9, 0xe2, 0x75, 0xf0, 0x7d, 0x1a, 0xba, 0xa4, 0x40, 0x4b, 0x93, 0x12, 0xa2, 0x80, 0x95, 0x0d, 0x03, 0x93, 0xe8, 0xa5, 0x4d, 0xe2, 0x3d, 0x81, 0xf5, 0xce, 0xd4, 0x2d, 0x25, 0x59, 0x16, 0x5c, 0xe7, 0xda, 0xc7, 0x45, 0xd2, 0x7e, 0x2c, 0x38, 0xd4, 0x37, 0x64, 0xb2, 0xc2, 0x28, 0xc5, 0x72, 0x16, 0x32, 0x45, 0x36, 0x6f, 0x9f},
- subYX: fp.Elt{0x09, 0xf4, 0x7e, 0xbd, 0x89, 0xdb, 0x19, 0x58, 0xe1, 0x08, 0x00, 0x8a, 0xf4, 0x5f, 0x2a, 0x32, 0x40, 0xf0, 0x2c, 0x3f, 0x5d, 0xe4, 0xfc, 0x89, 0x11, 0x24, 0xb4, 0x2f, 0x97, 0xad, 0xac, 0x8f, 0x19, 0xab, 0xfa, 0x12, 0xe5, 0xf9, 0x50, 0x4e, 0x50, 0x6f, 0x32, 0x30, 0x88, 0xa6, 0xe5, 0x48, 0x28, 0xa2, 0x1b, 0x9f, 0xcd, 0xe2, 0x43, 0x38},
- dt2: fp.Elt{0xa9, 0xcc, 0x53, 0x39, 0x86, 0x02, 0x60, 0x75, 0x34, 0x99, 0x57, 0xbd, 0xfc, 0x5a, 0x8e, 0xce, 0x5e, 0x98, 0x22, 0xd0, 0xa5, 0x24, 0xff, 0x90, 0x28, 0x9f, 0x58, 0xf3, 0x39, 0xe9, 0xba, 0x36, 0x23, 0xfb, 0x7f, 0x41, 0xcc, 0x2b, 0x5a, 0x25, 0x3f, 0x4c, 0x2a, 0xf1, 0x52, 0x6f, 0x2f, 0x07, 0xe3, 0x88, 0x81, 0x77, 0xdd, 0x7c, 0x88, 0x82},
- },
- { /* 7P*/
- addYX: fp.Elt{0xf7, 0xee, 0x88, 0xfd, 0x3a, 0xbf, 0x7e, 0x28, 0x39, 0x23, 0x79, 0xe6, 0x5c, 0x56, 0xcb, 0xb5, 0x48, 0x6a, 0x80, 0x6d, 0x37, 0x60, 0x6c, 0x10, 0x35, 0x49, 0x4b, 0x46, 0x60, 0xd4, 0x79, 0xd4, 0x53, 0xd3, 0x67, 0x88, 0xd0, 0x41, 0xd5, 0x43, 0x85, 0xc8, 0x71, 0xe3, 0x1c, 0xb6, 0xda, 0x22, 0x64, 0x8f, 0x80, 0xac, 0xad, 0x7d, 0xd5, 0x82},
- subYX: fp.Elt{0x92, 0x40, 0xc1, 0x83, 0x21, 0x9b, 0xd5, 0x7d, 0x3f, 0x29, 0xb6, 0x26, 0xef, 0x12, 0xb9, 0x27, 0x39, 0x42, 0x37, 0x97, 0x09, 0x9a, 0x08, 0xe1, 0x68, 0xb6, 0x7a, 0x3f, 0x9f, 0x45, 0xf8, 0x37, 0x19, 0x83, 0x97, 0xe6, 0x73, 0x30, 0x32, 0x35, 0xcf, 0xae, 0x5c, 0x12, 0x68, 0xdf, 0x6e, 0x2b, 0xde, 0x83, 0xa0, 0x44, 0x74, 0x2e, 0x4a, 0xe9},
- dt2: fp.Elt{0xcb, 0x22, 0x0a, 0xda, 0x6b, 0xc1, 0x8a, 0x29, 0xa1, 0xac, 0x8b, 0x5b, 0x8b, 0x32, 0x20, 0xf2, 0x21, 0xae, 0x0c, 0x43, 0xc4, 0xd7, 0x19, 0x37, 0x3d, 0x79, 0x25, 0x98, 0x6c, 0x9c, 0x22, 0x31, 0x2a, 0x55, 0x9f, 0xda, 0x5e, 0xa8, 0x13, 0xdb, 0x8e, 0x2e, 0x16, 0x39, 0xf4, 0x91, 0x6f, 0xec, 0x71, 0x71, 0xc9, 0x10, 0xf2, 0xa4, 0x8f, 0x11},
- },
- { /* 9P*/
- addYX: fp.Elt{0x85, 0xdd, 0x37, 0x62, 0x74, 0x8e, 0x33, 0x5b, 0x25, 0x12, 0x1b, 0xe7, 0xdf, 0x47, 0xe5, 0x12, 0xfd, 0x3a, 0x3a, 0xf5, 0x5d, 0x4c, 0xa2, 0x29, 0x3c, 0x5c, 0x2f, 0xee, 0x18, 0x19, 0x0a, 0x2b, 0xef, 0x67, 0x50, 0x7a, 0x0d, 0x29, 0xae, 0x55, 0x82, 0xcd, 0xd6, 0x41, 0x90, 0xb4, 0x13, 0x31, 0x5d, 0x11, 0xb8, 0xaa, 0x12, 0x86, 0x08, 0xac},
- subYX: fp.Elt{0xcc, 0x37, 0x8d, 0x83, 0x5f, 0xfd, 0xde, 0xd5, 0xf7, 0xf1, 0xae, 0x0a, 0xa7, 0x0b, 0xeb, 0x6d, 0x19, 0x8a, 0xb6, 0x1a, 0x59, 0xd8, 0xff, 0x3c, 0xbc, 0xbc, 0xef, 0x9c, 0xda, 0x7b, 0x75, 0x12, 0xaf, 0x80, 0x8f, 0x2c, 0x3c, 0xaa, 0x0b, 0x17, 0x86, 0x36, 0x78, 0x18, 0xc8, 0x8a, 0xf6, 0xb8, 0x2c, 0x2f, 0x57, 0x2c, 0x62, 0x57, 0xf6, 0x90},
- dt2: fp.Elt{0x83, 0xbc, 0xa2, 0x07, 0xa5, 0x38, 0x96, 0xea, 0xfe, 0x11, 0x46, 0x1d, 0x3b, 0xcd, 0x42, 0xc5, 0xee, 0x67, 0x04, 0x72, 0x08, 0xd8, 0xd9, 0x96, 0x07, 0xf7, 0xac, 0xc3, 0x64, 0xf1, 0x98, 0x2c, 0x55, 0xd7, 0x7d, 0xc8, 0x6c, 0xbd, 0x2c, 0xff, 0x15, 0xd6, 0x6e, 0xb8, 0x17, 0x8e, 0xa8, 0x27, 0x66, 0xb1, 0x73, 0x79, 0x96, 0xff, 0x29, 0x10},
- },
- { /* 11P*/
- addYX: fp.Elt{0x76, 0xcb, 0x9b, 0x0c, 0x5b, 0xfe, 0xe1, 0x2a, 0xdd, 0x6f, 0x6c, 0xdd, 0x6f, 0xb4, 0xc0, 0xc2, 0x1b, 0x4b, 0x38, 0xe8, 0x66, 0x8c, 0x1e, 0x31, 0x63, 0xb9, 0x94, 0xcd, 0xc3, 0x8c, 0x44, 0x25, 0x7b, 0xd5, 0x39, 0x80, 0xfc, 0x01, 0xaa, 0xf7, 0x2a, 0x61, 0x8a, 0x25, 0xd2, 0x5f, 0xc5, 0x66, 0x38, 0xa4, 0x17, 0xcf, 0x3e, 0x11, 0x0f, 0xa3},
- subYX: fp.Elt{0xe0, 0xb6, 0xd1, 0x9c, 0x71, 0x49, 0x2e, 0x7b, 0xde, 0x00, 0xda, 0x6b, 0xf1, 0xec, 0xe6, 0x7a, 0x15, 0x38, 0x71, 0xe9, 0x7b, 0xdb, 0xf8, 0x98, 0xc0, 0x91, 0x2e, 0x53, 0xee, 0x92, 0x87, 0x25, 0xc9, 0xb0, 0xbb, 0x33, 0x15, 0x46, 0x7f, 0xfd, 0x4f, 0x8b, 0x77, 0x05, 0x96, 0xb6, 0xe2, 0x08, 0xdb, 0x0d, 0x09, 0xee, 0x5b, 0xd1, 0x2a, 0x63},
- dt2: fp.Elt{0x8f, 0x7b, 0x57, 0x8c, 0xbf, 0x06, 0x0d, 0x43, 0x21, 0x92, 0x94, 0x2d, 0x6a, 0x38, 0x07, 0x0f, 0xa0, 0xf1, 0xe3, 0xd8, 0x2a, 0xbf, 0x46, 0xc6, 0x9e, 0x1f, 0x8f, 0x2b, 0x46, 0x84, 0x0b, 0x74, 0xed, 0xff, 0xf8, 0xa5, 0x94, 0xae, 0xf1, 0x67, 0xb1, 0x9b, 0xdd, 0x4a, 0xd0, 0xdb, 0xc2, 0xb5, 0x58, 0x49, 0x0c, 0xa9, 0x1d, 0x7d, 0xa9, 0xd3},
- },
- { /* 13P*/
- addYX: fp.Elt{0x73, 0x84, 0x2e, 0x31, 0x1f, 0xdc, 0xed, 0x9f, 0x74, 0xfa, 0xe0, 0x35, 0xb1, 0x85, 0x6a, 0x8d, 0x86, 0xd0, 0xff, 0xd6, 0x08, 0x43, 0x73, 0x1a, 0xd5, 0xf8, 0x43, 0xd4, 0xb3, 0xe5, 0x3f, 0xa8, 0x84, 0x17, 0x59, 0x65, 0x4e, 0xe6, 0xee, 0x54, 0x9c, 0xda, 0x5e, 0x7e, 0x98, 0x29, 0x6d, 0x73, 0x34, 0x1f, 0x99, 0x80, 0x54, 0x54, 0x81, 0x0b},
- subYX: fp.Elt{0xb1, 0xe5, 0xbb, 0x80, 0x22, 0x9c, 0x81, 0x6d, 0xaf, 0x27, 0x65, 0x6f, 0x7e, 0x9c, 0xb6, 0x8d, 0x35, 0x5c, 0x2e, 0x20, 0x48, 0x7a, 0x28, 0xf0, 0x97, 0xfe, 0xb7, 0x71, 0xce, 0xd6, 0xad, 0x3a, 0x81, 0xf6, 0x74, 0x5e, 0xf3, 0xfd, 0x1b, 0xd4, 0x1e, 0x7c, 0xc2, 0xb7, 0xc8, 0xa6, 0xc9, 0x89, 0x03, 0x47, 0xec, 0x24, 0xd6, 0x0e, 0xec, 0x9c},
- dt2: fp.Elt{0x91, 0x0a, 0x43, 0x34, 0x20, 0xc2, 0x64, 0xf7, 0x4e, 0x48, 0xc8, 0xd2, 0x95, 0x83, 0xd1, 0xa4, 0xfb, 0x4e, 0x41, 0x3b, 0x0d, 0xd5, 0x07, 0xd9, 0xf1, 0x13, 0x16, 0x78, 0x54, 0x57, 0xd0, 0xf1, 0x4f, 0x20, 0xac, 0xcf, 0x9c, 0x3b, 0x33, 0x0b, 0x99, 0x54, 0xc3, 0x7f, 0x3e, 0x57, 0x26, 0x86, 0xd5, 0xa5, 0x2b, 0x8d, 0xe3, 0x19, 0x36, 0xf7},
- },
- { /* 15P*/
- addYX: fp.Elt{0x23, 0x69, 0x47, 0x14, 0xf9, 0x9a, 0x50, 0xff, 0x64, 0xd1, 0x50, 0x35, 0xc3, 0x11, 0xd3, 0x19, 0xcf, 0x87, 0xda, 0x30, 0x0b, 0x50, 0xda, 0xc0, 0xe0, 0x25, 0x00, 0xe5, 0x68, 0x93, 0x04, 0xc2, 0xaf, 0xbd, 0x2f, 0x36, 0x5f, 0x47, 0x96, 0x10, 0xa8, 0xbd, 0xe4, 0x88, 0xac, 0x80, 0x52, 0x61, 0x73, 0xe9, 0x63, 0xdd, 0x99, 0xad, 0x20, 0x5b},
- subYX: fp.Elt{0x1b, 0x5e, 0xa2, 0x2a, 0x25, 0x0f, 0x86, 0xc0, 0xb1, 0x2e, 0x0c, 0x13, 0x40, 0x8d, 0xf0, 0xe6, 0x00, 0x55, 0x08, 0xc5, 0x7d, 0xf4, 0xc9, 0x31, 0x25, 0x3a, 0x99, 0x69, 0xdd, 0x67, 0x63, 0x9a, 0xd6, 0x89, 0x2e, 0xa1, 0x19, 0xca, 0x2c, 0xd9, 0x59, 0x5f, 0x5d, 0xc3, 0x6e, 0x62, 0x36, 0x12, 0x59, 0x15, 0xe1, 0xdc, 0xa4, 0xad, 0xc9, 0xd0},
- dt2: fp.Elt{0xbc, 0xea, 0xfc, 0xaf, 0x66, 0x23, 0xb7, 0x39, 0x6b, 0x2a, 0x96, 0xa8, 0x54, 0x43, 0xe9, 0xaa, 0x32, 0x40, 0x63, 0x92, 0x5e, 0xdf, 0x35, 0xc2, 0x9f, 0x24, 0x0c, 0xed, 0xfc, 0xde, 0x73, 0x8f, 0xa7, 0xd5, 0xa3, 0x2b, 0x18, 0x1f, 0xb0, 0xf8, 0xeb, 0x55, 0xd9, 0xc3, 0xfd, 0x28, 0x7c, 0x4f, 0xce, 0x0d, 0xf7, 0xae, 0xc2, 0x83, 0xc3, 0x78},
- },
- { /* 17P*/
- addYX: fp.Elt{0x71, 0xe6, 0x60, 0x93, 0x37, 0xdb, 0x01, 0xa5, 0x4c, 0xba, 0xe8, 0x8e, 0xd5, 0xf9, 0xd3, 0x98, 0xe5, 0xeb, 0xab, 0x3a, 0x15, 0x8b, 0x35, 0x60, 0xbe, 0xe5, 0x9c, 0x2d, 0x10, 0x9b, 0x2e, 0xcf, 0x65, 0x64, 0xea, 0x8f, 0x72, 0xce, 0xf5, 0x18, 0xe5, 0xe2, 0xf0, 0x0e, 0xae, 0x04, 0xec, 0xa0, 0x20, 0x65, 0x63, 0x07, 0xb1, 0x9f, 0x03, 0x97},
- subYX: fp.Elt{0x9e, 0x41, 0x64, 0x30, 0x95, 0x7f, 0x3a, 0x89, 0x7b, 0x0a, 0x79, 0x59, 0x23, 0x9a, 0x3b, 0xfe, 0xa4, 0x13, 0x08, 0xb2, 0x2e, 0x04, 0x50, 0x10, 0x30, 0xcd, 0x2e, 0xa4, 0x91, 0x71, 0x50, 0x36, 0x4a, 0x02, 0xf4, 0x8d, 0xa3, 0x36, 0x1b, 0xf4, 0x52, 0xba, 0x15, 0x04, 0x8b, 0x80, 0x25, 0xd9, 0xae, 0x67, 0x20, 0xd9, 0x88, 0x8f, 0x97, 0xa6},
- dt2: fp.Elt{0xb5, 0xe7, 0x46, 0xbd, 0x55, 0x23, 0xa0, 0x68, 0xc0, 0x12, 0xd9, 0xf1, 0x0a, 0x75, 0xe2, 0xda, 0xf4, 0x6b, 0xca, 0x14, 0xe4, 0x9f, 0x0f, 0xb5, 0x3c, 0xa6, 0xa5, 0xa2, 0x63, 0x94, 0xd1, 0x1c, 0x39, 0x58, 0x57, 0x02, 0x27, 0x98, 0xb6, 0x47, 0xc6, 0x61, 0x4b, 0x5c, 0xab, 0x6f, 0x2d, 0xab, 0xe3, 0xc1, 0x69, 0xf9, 0x12, 0xb0, 0xc8, 0xd5},
- },
- { /* 19P*/
- addYX: fp.Elt{0x19, 0x7d, 0xd5, 0xac, 0x79, 0xa2, 0x82, 0x9b, 0x28, 0x31, 0x22, 0xc0, 0x73, 0x02, 0x76, 0x17, 0x10, 0x70, 0x79, 0x57, 0xc9, 0x84, 0x62, 0x8e, 0x04, 0x04, 0x61, 0x67, 0x08, 0x48, 0xb4, 0x4b, 0xde, 0x53, 0x8c, 0xff, 0x36, 0x1b, 0x62, 0x86, 0x5d, 0xe1, 0x9b, 0xb1, 0xe5, 0xe8, 0x44, 0x64, 0xa1, 0x68, 0x3f, 0xa8, 0x45, 0x52, 0x91, 0xed},
- subYX: fp.Elt{0x42, 0x1a, 0x36, 0x1f, 0x90, 0x15, 0x24, 0x8d, 0x24, 0x80, 0xe6, 0xfe, 0x1e, 0xf0, 0xad, 0xaf, 0x6a, 0x93, 0xf0, 0xa6, 0x0d, 0x5d, 0xea, 0xf6, 0x62, 0x96, 0x7a, 0x05, 0x76, 0x85, 0x74, 0x32, 0xc7, 0xc8, 0x64, 0x53, 0x62, 0xe7, 0x54, 0x84, 0xe0, 0x40, 0x66, 0x19, 0x70, 0x40, 0x95, 0x35, 0x68, 0x64, 0x43, 0xcd, 0xba, 0x29, 0x32, 0xa8},
- dt2: fp.Elt{0x3e, 0xf6, 0xd6, 0xe4, 0x99, 0xeb, 0x20, 0x66, 0x08, 0x2e, 0x26, 0x64, 0xd7, 0x76, 0xf3, 0xb4, 0xc5, 0xa4, 0x35, 0x92, 0xd2, 0x99, 0x70, 0x5a, 0x1a, 0xe9, 0xe9, 0x3d, 0x3b, 0xe1, 0xcd, 0x0e, 0xee, 0x24, 0x13, 0x03, 0x22, 0xd6, 0xd6, 0x72, 0x08, 0x2b, 0xde, 0xfd, 0x93, 0xed, 0x0c, 0x7f, 0x5e, 0x31, 0x22, 0x4d, 0x80, 0x78, 0xc0, 0x48},
- },
- { /* 21P*/
- addYX: fp.Elt{0x8f, 0x72, 0xd2, 0x9e, 0xc4, 0xcd, 0x2c, 0xbf, 0xa8, 0xd3, 0x24, 0x62, 0x28, 0xee, 0x39, 0x0a, 0x19, 0x3a, 0x58, 0xff, 0x21, 0x2e, 0x69, 0x6c, 0x6e, 0x18, 0xd0, 0xcd, 0x61, 0xc1, 0x18, 0x02, 0x5a, 0xe9, 0xe3, 0xef, 0x1f, 0x8e, 0x10, 0xe8, 0x90, 0x2b, 0x48, 0xcd, 0xee, 0x38, 0xbd, 0x3a, 0xca, 0xbc, 0x2d, 0xe2, 0x3a, 0x03, 0x71, 0x02},
- subYX: fp.Elt{0xf8, 0xa4, 0x32, 0x26, 0x66, 0xaf, 0x3b, 0x53, 0xe7, 0xb0, 0x91, 0x92, 0xf5, 0x3c, 0x74, 0xce, 0xf2, 0xdd, 0x68, 0xa9, 0xf4, 0xcd, 0x5f, 0x60, 0xab, 0x71, 0xdf, 0xcd, 0x5c, 0x5d, 0x51, 0x72, 0x3a, 0x96, 0xea, 0xd6, 0xde, 0x54, 0x8e, 0x55, 0x4c, 0x08, 0x4c, 0x60, 0xdd, 0x34, 0xa9, 0x6f, 0xf3, 0x04, 0x02, 0xa8, 0xa6, 0x4e, 0x4d, 0x62},
- dt2: fp.Elt{0x76, 0x4a, 0xae, 0x38, 0x62, 0x69, 0x72, 0xdc, 0xe8, 0x43, 0xbe, 0x1d, 0x61, 0xde, 0x31, 0xc3, 0x42, 0x8f, 0x33, 0x9d, 0xca, 0xc7, 0x9c, 0xec, 0x6a, 0xe2, 0xaa, 0x01, 0x49, 0x78, 0x8d, 0x72, 0x4f, 0x38, 0xea, 0x52, 0xc2, 0xd3, 0xc9, 0x39, 0x71, 0xba, 0xb9, 0x09, 0x9b, 0xa3, 0x7f, 0x45, 0x43, 0x65, 0x36, 0x29, 0xca, 0xe7, 0x5c, 0x5f},
- },
- { /* 23P*/
- addYX: fp.Elt{0x89, 0x42, 0x35, 0x48, 0x6d, 0x74, 0xe5, 0x1f, 0xc3, 0xdd, 0x28, 0x5b, 0x84, 0x41, 0x33, 0x9f, 0x42, 0xf3, 0x1d, 0x5d, 0x15, 0x6d, 0x76, 0x33, 0x36, 0xaf, 0xe9, 0xdd, 0xfa, 0x63, 0x4f, 0x7a, 0x9c, 0xeb, 0x1c, 0x4f, 0x34, 0x65, 0x07, 0x54, 0xbb, 0x4c, 0x8b, 0x62, 0x9d, 0xd0, 0x06, 0x99, 0xb3, 0xe9, 0xda, 0x85, 0x19, 0xb0, 0x3d, 0x3c},
- subYX: fp.Elt{0xbb, 0x99, 0xf6, 0xbf, 0xaf, 0x2c, 0x22, 0x0d, 0x7a, 0xaa, 0x98, 0x6f, 0x01, 0x82, 0x99, 0xcf, 0x88, 0xbd, 0x0e, 0x3a, 0x89, 0xe0, 0x9c, 0x8c, 0x17, 0x20, 0xc4, 0xe0, 0xcf, 0x43, 0x7a, 0xef, 0x0d, 0x9f, 0x87, 0xd4, 0xfb, 0xf2, 0x96, 0xb8, 0x03, 0xe8, 0xcb, 0x5c, 0xec, 0x65, 0x5f, 0x49, 0xa4, 0x7c, 0x85, 0xb4, 0xf6, 0xc7, 0xdb, 0xa3},
- dt2: fp.Elt{0x11, 0xf3, 0x32, 0xa3, 0xa7, 0xb2, 0x7d, 0x51, 0x82, 0x44, 0xeb, 0xa2, 0x7d, 0x72, 0xcb, 0xc6, 0xf6, 0xc7, 0xb2, 0x38, 0x0e, 0x0f, 0x4f, 0x29, 0x00, 0xe4, 0x5b, 0x94, 0x46, 0x86, 0x66, 0xa1, 0x83, 0xb3, 0xeb, 0x15, 0xb6, 0x31, 0x50, 0x28, 0xeb, 0xed, 0x0d, 0x32, 0x39, 0xe9, 0x23, 0x81, 0x99, 0x3e, 0xff, 0x17, 0x4c, 0x11, 0x43, 0xd1},
- },
- { /* 25P*/
- addYX: fp.Elt{0xce, 0xe7, 0xf8, 0x94, 0x8f, 0x96, 0xf8, 0x96, 0xe6, 0x72, 0x20, 0x44, 0x2c, 0xa7, 0xfc, 0xba, 0xc8, 0xe1, 0xbb, 0xc9, 0x16, 0x85, 0xcd, 0x0b, 0xe5, 0xb5, 0x5a, 0x7f, 0x51, 0x43, 0x63, 0x8b, 0x23, 0x8e, 0x1d, 0x31, 0xff, 0x46, 0x02, 0x66, 0xcc, 0x9e, 0x4d, 0xa2, 0xca, 0xe2, 0xc7, 0xfd, 0x22, 0xb1, 0xdb, 0xdf, 0x6f, 0xe6, 0xa5, 0x82},
- subYX: fp.Elt{0xd0, 0xf5, 0x65, 0x40, 0xec, 0x8e, 0x65, 0x42, 0x78, 0xc1, 0x65, 0xe4, 0x10, 0xc8, 0x0b, 0x1b, 0xdd, 0x96, 0x68, 0xce, 0xee, 0x45, 0x55, 0xd8, 0x6e, 0xd3, 0xe6, 0x77, 0x19, 0xae, 0xc2, 0x8d, 0x8d, 0x3e, 0x14, 0x3f, 0x6d, 0x00, 0x2f, 0x9b, 0xd1, 0x26, 0x60, 0x28, 0x0f, 0x3a, 0x47, 0xb3, 0xe6, 0x68, 0x28, 0x24, 0x25, 0xca, 0xc8, 0x06},
- dt2: fp.Elt{0x54, 0xbb, 0x60, 0x92, 0xdb, 0x8f, 0x0f, 0x38, 0xe0, 0xe6, 0xe4, 0xc9, 0xcc, 0x14, 0x62, 0x01, 0xc4, 0x2b, 0x0f, 0xcf, 0xed, 0x7d, 0x8e, 0xa4, 0xd9, 0x73, 0x0b, 0xba, 0x0c, 0xaf, 0x0c, 0xf9, 0xe2, 0xeb, 0x29, 0x2a, 0x53, 0xdf, 0x2c, 0x5a, 0xfa, 0x8f, 0xc1, 0x01, 0xd7, 0xb1, 0x45, 0x73, 0x92, 0x32, 0x83, 0x85, 0x12, 0x74, 0x89, 0x44},
- },
- { /* 27P*/
- addYX: fp.Elt{0x0b, 0x73, 0x3c, 0xc2, 0xb1, 0x2e, 0xe1, 0xa7, 0xf5, 0xc9, 0x7a, 0xfb, 0x3d, 0x2d, 0xac, 0x59, 0xdb, 0xfa, 0x36, 0x11, 0xd1, 0x13, 0x04, 0x51, 0x1d, 0xab, 0x9b, 0x6b, 0x93, 0xfe, 0xda, 0xb0, 0x8e, 0xb4, 0x79, 0x11, 0x21, 0x0f, 0x65, 0xb9, 0xbb, 0x79, 0x96, 0x2a, 0xfd, 0x30, 0xe0, 0xb4, 0x2d, 0x9a, 0x55, 0x25, 0x5d, 0xd4, 0xad, 0x2a},
- subYX: fp.Elt{0x9e, 0xc5, 0x04, 0xfe, 0xec, 0x3c, 0x64, 0x1c, 0xed, 0x95, 0xed, 0xae, 0xaf, 0x5c, 0x6e, 0x08, 0x9e, 0x02, 0x29, 0x59, 0x7e, 0x5f, 0xc4, 0x9a, 0xd5, 0x32, 0x72, 0x86, 0xe1, 0x4e, 0x3c, 0xce, 0x99, 0x69, 0x3b, 0xc4, 0xdd, 0x4d, 0xb7, 0xbb, 0xda, 0x3b, 0x1a, 0x99, 0xaa, 0x62, 0x15, 0xc1, 0xf0, 0xb6, 0x6c, 0xec, 0x56, 0xc1, 0xff, 0x0c},
- dt2: fp.Elt{0x2f, 0xf1, 0x3f, 0x7a, 0x2d, 0x56, 0x19, 0x7f, 0xea, 0xbe, 0x59, 0x2e, 0x13, 0x67, 0x81, 0xfb, 0xdb, 0xc8, 0xa3, 0x1d, 0xd5, 0xe9, 0x13, 0x8b, 0x29, 0xdf, 0xcf, 0x9f, 0xe7, 0xd9, 0x0b, 0x70, 0xd3, 0x15, 0x57, 0x4a, 0xe9, 0x50, 0x12, 0x1b, 0x81, 0x4b, 0x98, 0x98, 0xa8, 0x31, 0x1d, 0x27, 0x47, 0x38, 0xed, 0x57, 0x99, 0x26, 0xb2, 0xee},
- },
- { /* 29P*/
- addYX: fp.Elt{0x1c, 0xb2, 0xb2, 0x67, 0x3b, 0x8b, 0x3d, 0x5a, 0x30, 0x7e, 0x38, 0x7e, 0x3c, 0x3d, 0x28, 0x56, 0x59, 0xd8, 0x87, 0x53, 0x8b, 0xe6, 0x6c, 0x5d, 0xe5, 0x0a, 0x33, 0x10, 0xce, 0xa2, 0x17, 0x0d, 0xe8, 0x76, 0xee, 0x68, 0xa8, 0x72, 0x54, 0xbd, 0xa6, 0x24, 0x94, 0x6e, 0x77, 0xc7, 0x53, 0xb7, 0x89, 0x1c, 0x7a, 0xe9, 0x78, 0x9a, 0x74, 0x5f},
- subYX: fp.Elt{0x76, 0x96, 0x1c, 0xcf, 0x08, 0x55, 0xd8, 0x1e, 0x0d, 0xa3, 0x59, 0x95, 0x32, 0xf4, 0xc2, 0x8e, 0x84, 0x5e, 0x4b, 0x04, 0xda, 0x71, 0xc9, 0x78, 0x52, 0xde, 0x14, 0xb4, 0x31, 0xf4, 0xd4, 0xb8, 0x58, 0xc5, 0x20, 0xe8, 0xdd, 0x15, 0xb5, 0xee, 0xea, 0x61, 0xe0, 0xf5, 0xd6, 0xae, 0x55, 0x59, 0x05, 0x3e, 0xaf, 0x74, 0xac, 0x1f, 0x17, 0x82},
- dt2: fp.Elt{0x59, 0x24, 0xcd, 0xfc, 0x11, 0x7e, 0x85, 0x18, 0x3d, 0x69, 0xf7, 0x71, 0x31, 0x66, 0x98, 0x42, 0x95, 0x00, 0x8c, 0xb2, 0xae, 0x39, 0x7e, 0x85, 0xd6, 0xb0, 0x02, 0xec, 0xce, 0xfc, 0x25, 0xb2, 0xe3, 0x99, 0x8e, 0x5b, 0x61, 0x96, 0x2e, 0x6d, 0x96, 0x57, 0x71, 0xa5, 0x93, 0x41, 0x0e, 0x6f, 0xfd, 0x0a, 0xbf, 0xa9, 0xf7, 0x56, 0xa9, 0x3e},
- },
- { /* 31P*/
- addYX: fp.Elt{0xa2, 0x2e, 0x0c, 0x17, 0x4d, 0xcc, 0x85, 0x2c, 0x18, 0xa0, 0xd2, 0x08, 0xba, 0x11, 0xfa, 0x47, 0x71, 0x86, 0xaf, 0x36, 0x6a, 0xd7, 0xfe, 0xb9, 0xb0, 0x2f, 0x89, 0x98, 0x49, 0x69, 0xf8, 0x6a, 0xad, 0x27, 0x5e, 0x0a, 0x22, 0x60, 0x5e, 0x5d, 0xca, 0x06, 0x51, 0x27, 0x99, 0x29, 0x85, 0x68, 0x98, 0xe1, 0xc4, 0x21, 0x50, 0xa0, 0xe9, 0xc1},
- subYX: fp.Elt{0x4d, 0x70, 0xee, 0x91, 0x92, 0x3f, 0xb7, 0xd3, 0x1d, 0xdb, 0x8d, 0x6e, 0x16, 0xf5, 0x65, 0x7d, 0x5f, 0xb5, 0x6c, 0x59, 0x26, 0x70, 0x4b, 0xf2, 0xfc, 0xe7, 0xdf, 0x86, 0xfe, 0xa5, 0xa7, 0xa6, 0x5d, 0xfb, 0x06, 0xe9, 0xf9, 0xcc, 0xc0, 0x37, 0xcc, 0xd8, 0x09, 0x04, 0xd2, 0xa5, 0x1d, 0xd7, 0xb7, 0xce, 0x92, 0xac, 0x3c, 0xad, 0xfb, 0xae},
- dt2: fp.Elt{0x17, 0xa3, 0x9a, 0xc7, 0x86, 0x2a, 0x51, 0xf7, 0x96, 0x79, 0x49, 0x22, 0x2e, 0x5a, 0x01, 0x5c, 0xb5, 0x95, 0xd4, 0xe8, 0xcb, 0x00, 0xca, 0x2d, 0x55, 0xb6, 0x34, 0x36, 0x0b, 0x65, 0x46, 0xf0, 0x49, 0xfc, 0x87, 0x86, 0xe5, 0xc3, 0x15, 0xdb, 0x32, 0xcd, 0xf2, 0xd3, 0x82, 0x4c, 0xe6, 0x61, 0x8a, 0xaf, 0xd4, 0x9e, 0x0f, 0x5a, 0xf2, 0x81},
- },
- { /* 33P*/
- addYX: fp.Elt{0x88, 0x10, 0xc0, 0xcb, 0xf5, 0x77, 0xae, 0xa5, 0xbe, 0xf6, 0xcd, 0x2e, 0x8b, 0x7e, 0xbd, 0x79, 0x62, 0x4a, 0xeb, 0x69, 0xc3, 0x28, 0xaa, 0x72, 0x87, 0xa9, 0x25, 0x87, 0x46, 0xea, 0x0e, 0x62, 0xa3, 0x6a, 0x1a, 0xe2, 0xba, 0xdc, 0x81, 0x10, 0x33, 0x01, 0xf6, 0x16, 0x89, 0x80, 0xc6, 0xcd, 0xdb, 0xdc, 0xba, 0x0e, 0x09, 0x4a, 0x35, 0x4a},
- subYX: fp.Elt{0x86, 0xb2, 0x2b, 0xd0, 0xb8, 0x4a, 0x6d, 0x66, 0x7b, 0x32, 0xdf, 0x3b, 0x1a, 0x19, 0x1f, 0x63, 0xee, 0x1f, 0x3d, 0x1c, 0x5c, 0x14, 0x60, 0x5b, 0x72, 0x49, 0x07, 0xb1, 0x0d, 0x72, 0xc6, 0x35, 0xf0, 0xbc, 0x5e, 0xda, 0x80, 0x6b, 0x64, 0x5b, 0xe5, 0x34, 0x54, 0x39, 0xdd, 0xe6, 0x3c, 0xcb, 0xe5, 0x29, 0x32, 0x06, 0xc6, 0xb1, 0x96, 0x34},
- dt2: fp.Elt{0x85, 0x86, 0xf5, 0x84, 0x86, 0xe6, 0x77, 0x8a, 0x71, 0x85, 0x0c, 0x4f, 0x81, 0x5b, 0x29, 0x06, 0xb5, 0x2e, 0x26, 0x71, 0x07, 0x78, 0x07, 0xae, 0xbc, 0x95, 0x46, 0xc3, 0x65, 0xac, 0xe3, 0x76, 0x51, 0x7d, 0xd4, 0x85, 0x31, 0xe3, 0x43, 0xf3, 0x1b, 0x7c, 0xf7, 0x6b, 0x2c, 0xf8, 0x1c, 0xbb, 0x8d, 0xca, 0xab, 0x4b, 0xba, 0x7f, 0xa4, 0xe2},
- },
- { /* 35P*/
- addYX: fp.Elt{0x1a, 0xee, 0xe7, 0xa4, 0x8a, 0x9d, 0x53, 0x80, 0xc6, 0xb8, 0x4e, 0xdc, 0x89, 0xe0, 0xc4, 0x2b, 0x60, 0x52, 0x6f, 0xec, 0x81, 0xd2, 0x55, 0x6b, 0x1b, 0x6f, 0x17, 0x67, 0x8e, 0x42, 0x26, 0x4c, 0x65, 0x23, 0x29, 0xc6, 0x7b, 0xcd, 0x9f, 0xad, 0x4b, 0x42, 0xd3, 0x0c, 0x75, 0xc3, 0x8a, 0xf5, 0xbe, 0x9e, 0x55, 0xf7, 0x47, 0x5d, 0xbd, 0x3a},
- subYX: fp.Elt{0x0d, 0xa8, 0x3b, 0xf9, 0xc7, 0x7e, 0xc6, 0x86, 0x94, 0xc0, 0x01, 0xff, 0x27, 0xce, 0x43, 0xac, 0xe5, 0xe1, 0xd2, 0x8d, 0xc1, 0x22, 0x31, 0xbe, 0xe1, 0xaf, 0xf9, 0x4a, 0x78, 0xa1, 0x0c, 0xaa, 0xd4, 0x80, 0xe4, 0x09, 0x8d, 0xfb, 0x1d, 0x52, 0xc8, 0x60, 0x2d, 0xf2, 0xa2, 0x89, 0x02, 0x56, 0x3d, 0x56, 0x27, 0x85, 0xc7, 0xf0, 0x2b, 0x9a},
- dt2: fp.Elt{0x62, 0x7c, 0xc7, 0x6b, 0x2c, 0x9d, 0x0a, 0x7c, 0xe5, 0x50, 0x3c, 0xe6, 0x87, 0x1c, 0x82, 0x30, 0x67, 0x3c, 0x39, 0xb6, 0xa0, 0x31, 0xfb, 0x03, 0x7b, 0xa1, 0x58, 0xdf, 0x12, 0x76, 0x5d, 0x5d, 0x0a, 0x8f, 0x9b, 0x37, 0x32, 0xc3, 0x60, 0x33, 0xea, 0x9f, 0x0a, 0x99, 0xfa, 0x20, 0xd0, 0x33, 0x21, 0xc3, 0x94, 0xd4, 0x86, 0x49, 0x7c, 0x4e},
- },
- { /* 37P*/
- addYX: fp.Elt{0xc7, 0x0c, 0x71, 0xfe, 0x55, 0xd1, 0x95, 0x8f, 0x43, 0xbb, 0x6b, 0x74, 0x30, 0xbd, 0xe8, 0x6f, 0x1c, 0x1b, 0x06, 0x62, 0xf5, 0xfc, 0x65, 0xa0, 0xeb, 0x81, 0x12, 0xc9, 0x64, 0x66, 0x61, 0xde, 0xf3, 0x6d, 0xd4, 0xae, 0x8e, 0xb1, 0x72, 0xe0, 0xcd, 0x37, 0x01, 0x28, 0x52, 0xd7, 0x39, 0x46, 0x0c, 0x55, 0xcf, 0x47, 0x70, 0xef, 0xa1, 0x17},
- subYX: fp.Elt{0x8d, 0x58, 0xde, 0x83, 0x88, 0x16, 0x0e, 0x12, 0x42, 0x03, 0x50, 0x60, 0x4b, 0xdf, 0xbf, 0x95, 0xcc, 0x7d, 0x18, 0x17, 0x7e, 0x31, 0x5d, 0x8a, 0x66, 0xc1, 0xcf, 0x14, 0xea, 0xf4, 0xf4, 0xe5, 0x63, 0x2d, 0x32, 0x86, 0x9b, 0xed, 0x1f, 0x4f, 0x03, 0xaf, 0x33, 0x92, 0xcb, 0xaf, 0x9c, 0x05, 0x0d, 0x47, 0x1b, 0x42, 0xba, 0x13, 0x22, 0x98},
- dt2: fp.Elt{0xb5, 0x48, 0xeb, 0x7d, 0x3d, 0x10, 0x9f, 0x59, 0xde, 0xf8, 0x1c, 0x4f, 0x7d, 0x9d, 0x40, 0x4d, 0x9e, 0x13, 0x24, 0xb5, 0x21, 0x09, 0xb7, 0xee, 0x98, 0x5c, 0x56, 0xbc, 0x5e, 0x2b, 0x78, 0x38, 0x06, 0xac, 0xe3, 0xe0, 0xfa, 0x2e, 0xde, 0x4f, 0xd2, 0xb3, 0xfb, 0x2d, 0x71, 0x84, 0xd1, 0x9d, 0x12, 0x5b, 0x35, 0xc8, 0x03, 0x68, 0x67, 0xc7},
- },
- { /* 39P*/
- addYX: fp.Elt{0xb6, 0x65, 0xfb, 0xa7, 0x06, 0x35, 0xbb, 0xe0, 0x31, 0x8d, 0x91, 0x40, 0x98, 0xab, 0x30, 0xe4, 0xca, 0x12, 0x59, 0x89, 0xed, 0x65, 0x5d, 0x7f, 0xae, 0x69, 0xa0, 0xa4, 0xfa, 0x78, 0xb4, 0xf7, 0xed, 0xae, 0x86, 0x78, 0x79, 0x64, 0x24, 0xa6, 0xd4, 0xe1, 0xf6, 0xd3, 0xa0, 0x89, 0xba, 0x20, 0xf4, 0x54, 0x0d, 0x8f, 0xdb, 0x1a, 0x79, 0xdb},
- subYX: fp.Elt{0xe1, 0x82, 0x0c, 0x4d, 0xde, 0x9f, 0x40, 0xf0, 0xc1, 0xbd, 0x8b, 0xd3, 0x24, 0x03, 0xcd, 0xf2, 0x92, 0x7d, 0xe2, 0x68, 0x7f, 0xf1, 0xbe, 0x69, 0xde, 0x34, 0x67, 0x4c, 0x85, 0x3b, 0xec, 0x98, 0xcc, 0x4d, 0x3e, 0xc0, 0x96, 0x27, 0xe6, 0x75, 0xfc, 0xdf, 0x37, 0xc0, 0x1e, 0x27, 0xe0, 0xf6, 0xc2, 0xbd, 0xbc, 0x3d, 0x9b, 0x39, 0xdc, 0xe2},
- dt2: fp.Elt{0xd8, 0x29, 0xa7, 0x39, 0xe3, 0x9f, 0x2f, 0x0e, 0x4b, 0x24, 0x21, 0x70, 0xef, 0xfd, 0x91, 0xea, 0xbf, 0xe1, 0x72, 0x90, 0xcc, 0xc9, 0x84, 0x0e, 0xad, 0xd5, 0xe6, 0xbb, 0xc5, 0x99, 0x7f, 0xa4, 0xf0, 0x2e, 0xcc, 0x95, 0x64, 0x27, 0x19, 0xd8, 0x4c, 0x27, 0x0d, 0xff, 0xb6, 0x29, 0xe2, 0x6c, 0xfa, 0xbb, 0x4d, 0x9c, 0xbb, 0xaf, 0xa5, 0xec},
- },
- { /* 41P*/
- addYX: fp.Elt{0xd6, 0x33, 0x3f, 0x9f, 0xcf, 0xfd, 0x4c, 0xd1, 0xfe, 0xe5, 0xeb, 0x64, 0x27, 0xae, 0x7a, 0xa2, 0x82, 0x50, 0x6d, 0xaa, 0xe3, 0x5d, 0xe2, 0x48, 0x60, 0xb3, 0x76, 0x04, 0xd9, 0x19, 0xa7, 0xa1, 0x73, 0x8d, 0x38, 0xa9, 0xaf, 0x45, 0xb5, 0xb2, 0x62, 0x9b, 0xf1, 0x35, 0x7b, 0x84, 0x66, 0xeb, 0x06, 0xef, 0xf1, 0xb2, 0x2d, 0x6a, 0x61, 0x15},
- subYX: fp.Elt{0x86, 0x50, 0x42, 0xf7, 0xda, 0x59, 0xb2, 0xcf, 0x0d, 0x3d, 0xee, 0x8e, 0x53, 0x5d, 0xf7, 0x9e, 0x6a, 0x26, 0x2d, 0xc7, 0x8c, 0x8e, 0x18, 0x50, 0x6d, 0xb7, 0x51, 0x4c, 0xa7, 0x52, 0x6e, 0x0e, 0x0a, 0x16, 0x74, 0xb2, 0x81, 0x8b, 0x56, 0x27, 0x22, 0x84, 0xf4, 0x56, 0xc5, 0x06, 0xe1, 0x8b, 0xca, 0x2d, 0xdb, 0x9a, 0xf6, 0x10, 0x9c, 0x51},
- dt2: fp.Elt{0x1f, 0x16, 0xa2, 0x78, 0x96, 0x1b, 0x85, 0x9c, 0x76, 0x49, 0xd4, 0x0f, 0xac, 0xb0, 0xf4, 0xd0, 0x06, 0x2c, 0x7e, 0x6d, 0x6e, 0x8e, 0xc7, 0x9f, 0x18, 0xad, 0xfc, 0x88, 0x0c, 0x0c, 0x09, 0x05, 0x05, 0xa0, 0x79, 0x72, 0x32, 0x72, 0x87, 0x0f, 0x49, 0x87, 0x0c, 0xb4, 0x12, 0xc2, 0x09, 0xf8, 0x9f, 0x30, 0x72, 0xa9, 0x47, 0x13, 0x93, 0x49},
- },
- { /* 43P*/
- addYX: fp.Elt{0xcc, 0xb1, 0x4c, 0xd3, 0xc0, 0x9e, 0x9e, 0x4d, 0x6d, 0x28, 0x0b, 0xa5, 0x94, 0xa7, 0x2e, 0xc2, 0xc7, 0xaf, 0x29, 0x73, 0xc9, 0x68, 0xea, 0x0f, 0x34, 0x37, 0x8d, 0x96, 0x8f, 0x3a, 0x3d, 0x73, 0x1e, 0x6d, 0x9f, 0xcf, 0x8d, 0x83, 0xb5, 0x71, 0xb9, 0xe1, 0x4b, 0x67, 0x71, 0xea, 0xcf, 0x56, 0xe5, 0xeb, 0x72, 0x15, 0x2f, 0x9e, 0xa8, 0xaa},
- subYX: fp.Elt{0xf4, 0x3e, 0x85, 0x1c, 0x1a, 0xef, 0x50, 0xd1, 0xb4, 0x20, 0xb2, 0x60, 0x05, 0x98, 0xfe, 0x47, 0x3b, 0xc1, 0x76, 0xca, 0x2c, 0x4e, 0x5a, 0x42, 0xa3, 0xf7, 0x20, 0xaa, 0x57, 0x39, 0xee, 0x34, 0x1f, 0xe1, 0x68, 0xd3, 0x7e, 0x06, 0xc4, 0x6c, 0xc7, 0x76, 0x2b, 0xe4, 0x1c, 0x48, 0x44, 0xe6, 0xe5, 0x44, 0x24, 0x8d, 0xb3, 0xb6, 0x88, 0x32},
- dt2: fp.Elt{0x18, 0xa7, 0xba, 0xd0, 0x44, 0x6f, 0x33, 0x31, 0x00, 0xf8, 0xf6, 0x12, 0xe3, 0xc5, 0xc7, 0xb5, 0x91, 0x9c, 0x91, 0xb5, 0x75, 0x18, 0x18, 0x8a, 0xab, 0xed, 0x24, 0x11, 0x2e, 0xce, 0x5a, 0x0f, 0x94, 0x5f, 0x2e, 0xca, 0xd3, 0x80, 0xea, 0xe5, 0x34, 0x96, 0x67, 0x8b, 0x6a, 0x26, 0x5e, 0xc8, 0x9d, 0x2c, 0x5e, 0x6c, 0xa2, 0x0c, 0xbf, 0xf0},
- },
- { /* 45P*/
- addYX: fp.Elt{0xb3, 0xbf, 0xa3, 0x85, 0xee, 0xf6, 0x58, 0x02, 0x78, 0xc4, 0x30, 0xd6, 0x57, 0x59, 0x8c, 0x88, 0x08, 0x7c, 0xbc, 0xbe, 0x0a, 0x74, 0xa9, 0xde, 0x69, 0xe7, 0x41, 0xd8, 0xbf, 0x66, 0x8d, 0x3d, 0x28, 0x00, 0x8c, 0x47, 0x65, 0x34, 0xfe, 0x86, 0x9e, 0x6a, 0xf2, 0x41, 0x6a, 0x94, 0xc4, 0x88, 0x75, 0x23, 0x0d, 0x52, 0x69, 0xee, 0x07, 0x89},
- subYX: fp.Elt{0x22, 0x3c, 0xa1, 0x70, 0x58, 0x97, 0x93, 0xbe, 0x59, 0xa8, 0x0b, 0x8a, 0x46, 0x2a, 0x38, 0x1e, 0x08, 0x6b, 0x61, 0x9f, 0xf2, 0x4a, 0x8b, 0x80, 0x68, 0x6e, 0xc8, 0x92, 0x60, 0xf3, 0xc9, 0x89, 0xb2, 0x6d, 0x63, 0xb0, 0xeb, 0x83, 0x15, 0x63, 0x0e, 0x64, 0xbb, 0xb8, 0xfe, 0xb4, 0x81, 0x90, 0x01, 0x28, 0x10, 0xb9, 0x74, 0x6e, 0xde, 0xa4},
- dt2: fp.Elt{0x1a, 0x23, 0x45, 0xa8, 0x6f, 0x4e, 0xa7, 0x4a, 0x0c, 0xeb, 0xb0, 0x43, 0xf9, 0xef, 0x99, 0x60, 0x5b, 0xdb, 0x66, 0xc0, 0x86, 0x71, 0x43, 0xb1, 0x22, 0x7b, 0x1c, 0xe7, 0x8d, 0x09, 0x1d, 0x83, 0x76, 0x9c, 0xd3, 0x5a, 0xdd, 0x42, 0xd9, 0x2f, 0x2d, 0xba, 0x7a, 0xc2, 0xd9, 0x6b, 0xd4, 0x7a, 0xf1, 0xd5, 0x5f, 0x6b, 0x85, 0xbf, 0x0b, 0xf1},
- },
- { /* 47P*/
- addYX: fp.Elt{0xb2, 0x83, 0xfa, 0x1f, 0xd2, 0xce, 0xb6, 0xf2, 0x2d, 0xea, 0x1b, 0xe5, 0x29, 0xa5, 0x72, 0xf9, 0x25, 0x48, 0x4e, 0xf2, 0x50, 0x1b, 0x39, 0xda, 0x34, 0xc5, 0x16, 0x13, 0xb4, 0x0c, 0xa1, 0x00, 0x79, 0x7a, 0xf5, 0x8b, 0xf3, 0x70, 0x14, 0xb6, 0xfc, 0x9a, 0x47, 0x68, 0x1e, 0x42, 0x70, 0x64, 0x2a, 0x84, 0x3e, 0x3d, 0x20, 0x58, 0xf9, 0x6a},
- subYX: fp.Elt{0xd9, 0xee, 0xc0, 0xc4, 0xf5, 0xc2, 0x86, 0xaf, 0x45, 0xd2, 0xd2, 0x87, 0x1b, 0x64, 0xd5, 0xe0, 0x8c, 0x44, 0x00, 0x4f, 0x43, 0x89, 0x04, 0x48, 0x4a, 0x0b, 0xca, 0x94, 0x06, 0x2f, 0x23, 0x5b, 0x6c, 0x8d, 0x44, 0x66, 0x53, 0xf5, 0x5a, 0x20, 0x72, 0x28, 0x58, 0x84, 0xcc, 0x73, 0x22, 0x5e, 0xd1, 0x0b, 0x56, 0x5e, 0x6a, 0xa3, 0x11, 0x91},
- dt2: fp.Elt{0x6e, 0x9f, 0x88, 0xa8, 0x68, 0x2f, 0x12, 0x37, 0x88, 0xfc, 0x92, 0x8f, 0x24, 0xeb, 0x5b, 0x2a, 0x2a, 0xd0, 0x14, 0x40, 0x4c, 0xa9, 0xa4, 0x03, 0x0c, 0x45, 0x48, 0x13, 0xe8, 0xa6, 0x37, 0xab, 0xc0, 0x06, 0x38, 0x6c, 0x96, 0x73, 0x40, 0x6c, 0xc6, 0xea, 0x56, 0xc6, 0xe9, 0x1a, 0x69, 0xeb, 0x7a, 0xd1, 0x33, 0x69, 0x58, 0x2b, 0xea, 0x2f},
- },
- { /* 49P*/
- addYX: fp.Elt{0x58, 0xa8, 0x05, 0x41, 0x00, 0x9d, 0xaa, 0xd9, 0x98, 0xcf, 0xb9, 0x41, 0xb5, 0x4a, 0x8d, 0xe2, 0xe7, 0xc0, 0x72, 0xef, 0xc8, 0x28, 0x6b, 0x68, 0x9d, 0xc9, 0xdf, 0x05, 0x8b, 0xd0, 0x04, 0x74, 0x79, 0x45, 0x52, 0x05, 0xa3, 0x6e, 0x35, 0x3a, 0xe3, 0xef, 0xb2, 0xdc, 0x08, 0x6f, 0x4e, 0x76, 0x85, 0x67, 0xba, 0x23, 0x8f, 0xdd, 0xaf, 0x09},
- subYX: fp.Elt{0xb4, 0x38, 0xc8, 0xff, 0x4f, 0x65, 0x2a, 0x7e, 0xad, 0xb1, 0xc6, 0xb9, 0x3d, 0xd6, 0xf7, 0x14, 0xcf, 0xf6, 0x98, 0x75, 0xbb, 0x47, 0x83, 0x90, 0xe7, 0xe1, 0xf6, 0x14, 0x99, 0x7e, 0xfa, 0xe4, 0x77, 0x24, 0xe3, 0xe7, 0xf0, 0x1e, 0xdb, 0x27, 0x4e, 0x16, 0x04, 0xf2, 0x08, 0x52, 0xfc, 0xec, 0x55, 0xdb, 0x2e, 0x67, 0xe1, 0x94, 0x32, 0x89},
- dt2: fp.Elt{0x00, 0xad, 0x03, 0x35, 0x1a, 0xb1, 0x88, 0xf0, 0xc9, 0x11, 0xe4, 0x12, 0x52, 0x61, 0xfd, 0x8a, 0x1b, 0x6a, 0x0a, 0x4c, 0x42, 0x46, 0x22, 0x0e, 0xa5, 0xf9, 0xe2, 0x50, 0xf2, 0xb2, 0x1f, 0x20, 0x78, 0x10, 0xf6, 0xbf, 0x7f, 0x0c, 0x9c, 0xad, 0x40, 0x8b, 0x82, 0xd4, 0xba, 0x69, 0x09, 0xac, 0x4b, 0x6d, 0xc4, 0x49, 0x17, 0x81, 0x57, 0x3b},
- },
- { /* 51P*/
- addYX: fp.Elt{0x0d, 0xfe, 0xb4, 0x35, 0x11, 0xbd, 0x1d, 0x6b, 0xc2, 0xc5, 0x3b, 0xd2, 0x23, 0x2c, 0x72, 0xe3, 0x48, 0xb1, 0x48, 0x73, 0xfb, 0xa3, 0x21, 0x6e, 0xc0, 0x09, 0x69, 0xac, 0xe1, 0x60, 0xbc, 0x24, 0x03, 0x99, 0x63, 0x0a, 0x00, 0xf0, 0x75, 0xf6, 0x92, 0xc5, 0xd6, 0xdb, 0x51, 0xd4, 0x7d, 0xe6, 0xf4, 0x11, 0x79, 0xd7, 0xc3, 0xaf, 0x48, 0xd0},
- subYX: fp.Elt{0xf4, 0x4f, 0xaf, 0x31, 0xe3, 0x10, 0x89, 0x95, 0xf0, 0x8a, 0xf6, 0x31, 0x9f, 0x48, 0x02, 0xba, 0x42, 0x2b, 0x3c, 0x22, 0x8b, 0xcc, 0x12, 0x98, 0x6e, 0x7a, 0x64, 0x3a, 0xc4, 0xca, 0x32, 0x2a, 0x72, 0xf8, 0x2c, 0xcf, 0x78, 0x5e, 0x7a, 0x75, 0x6e, 0x72, 0x46, 0x48, 0x62, 0x28, 0xac, 0x58, 0x1a, 0xc6, 0x59, 0x88, 0x2a, 0x44, 0x9e, 0x83},
- dt2: fp.Elt{0xb3, 0xde, 0x36, 0xfd, 0xeb, 0x1b, 0xd4, 0x24, 0x1b, 0x08, 0x8c, 0xfe, 0xa9, 0x41, 0xa1, 0x64, 0xf2, 0x6d, 0xdb, 0xf9, 0x94, 0xae, 0x86, 0x71, 0xab, 0x10, 0xbf, 0xa3, 0xb2, 0xa0, 0xdf, 0x10, 0x8c, 0x74, 0xce, 0xb3, 0xfc, 0xdb, 0xba, 0x15, 0xf6, 0x91, 0x7a, 0x9c, 0x36, 0x1e, 0x45, 0x07, 0x3c, 0xec, 0x1a, 0x61, 0x26, 0x93, 0xe3, 0x50},
- },
- { /* 53P*/
- addYX: fp.Elt{0xc5, 0x50, 0xc5, 0x83, 0xb0, 0xbd, 0xd9, 0xf6, 0x6d, 0x15, 0x5e, 0xc1, 0x1a, 0x33, 0xa0, 0xce, 0x13, 0x70, 0x3b, 0xe1, 0x31, 0xc6, 0xc4, 0x02, 0xec, 0x8c, 0xd5, 0x9c, 0x97, 0xd3, 0x12, 0xc4, 0xa2, 0xf9, 0xd5, 0xfb, 0x22, 0x69, 0x94, 0x09, 0x2f, 0x59, 0xce, 0xdb, 0xf2, 0xf2, 0x00, 0xe0, 0xa9, 0x08, 0x44, 0x2e, 0x8b, 0x6b, 0xf5, 0xb3},
- subYX: fp.Elt{0x90, 0xdd, 0xec, 0xa2, 0x65, 0xb7, 0x61, 0xbc, 0xaa, 0x70, 0xa2, 0x15, 0xd8, 0xb0, 0xf8, 0x8e, 0x23, 0x3d, 0x9f, 0x46, 0xa3, 0x29, 0x20, 0xd1, 0xa1, 0x15, 0x81, 0xc6, 0xb6, 0xde, 0xbe, 0x60, 0x63, 0x24, 0xac, 0x15, 0xfb, 0xeb, 0xd3, 0xea, 0x57, 0x13, 0x86, 0x38, 0x1e, 0x22, 0xf4, 0x8c, 0x5d, 0xaf, 0x1b, 0x27, 0x21, 0x4f, 0xa3, 0x63},
- dt2: fp.Elt{0x07, 0x15, 0x87, 0xc4, 0xfd, 0xa1, 0x97, 0x7a, 0x07, 0x1f, 0x56, 0xcc, 0xe3, 0x6a, 0x01, 0x90, 0xce, 0xf9, 0xfa, 0x50, 0xb2, 0xe0, 0x87, 0x8b, 0x6c, 0x63, 0x6c, 0xf6, 0x2a, 0x09, 0xef, 0xef, 0xd2, 0x31, 0x40, 0x25, 0xf6, 0x84, 0xcb, 0xe0, 0xc4, 0x23, 0xc1, 0xcb, 0xe2, 0x02, 0x83, 0x2d, 0xed, 0x74, 0x74, 0x8b, 0xf8, 0x7c, 0x81, 0x18},
- },
- { /* 55P*/
- addYX: fp.Elt{0x9e, 0xe5, 0x59, 0x95, 0x63, 0x2e, 0xac, 0x8b, 0x03, 0x3c, 0xc1, 0x8e, 0xe1, 0x5b, 0x56, 0x3c, 0x16, 0x41, 0xe4, 0xc2, 0x60, 0x0c, 0x6d, 0x65, 0x9f, 0xfc, 0x27, 0x68, 0x43, 0x44, 0x05, 0x12, 0x6c, 0xda, 0x04, 0xef, 0xcf, 0xcf, 0xdc, 0x0a, 0x1a, 0x7f, 0x12, 0xd3, 0xeb, 0x02, 0xb6, 0x04, 0xca, 0xd6, 0xcb, 0xf0, 0x22, 0xba, 0x35, 0x6d},
- subYX: fp.Elt{0x09, 0x6d, 0xf9, 0x64, 0x4c, 0xe6, 0x41, 0xff, 0x01, 0x4d, 0xce, 0x1e, 0xfa, 0x38, 0xa2, 0x25, 0x62, 0xff, 0x03, 0x39, 0x18, 0x91, 0xbb, 0x9d, 0xce, 0x02, 0xf0, 0xf1, 0x3c, 0x55, 0x18, 0xa9, 0xab, 0x4d, 0xd2, 0x35, 0xfd, 0x8d, 0xa9, 0xb2, 0xad, 0xb7, 0x06, 0x6e, 0xc6, 0x69, 0x49, 0xd6, 0x98, 0x98, 0x0b, 0x22, 0x81, 0x6b, 0xbd, 0xa0},
- dt2: fp.Elt{0x22, 0xf4, 0x85, 0x5d, 0x2b, 0xf1, 0x55, 0xa5, 0xd6, 0x27, 0x86, 0x57, 0x12, 0x1f, 0x16, 0x0a, 0x5a, 0x9b, 0xf2, 0x38, 0xb6, 0x28, 0xd8, 0x99, 0x0c, 0x89, 0x1d, 0x7f, 0xca, 0x21, 0x17, 0x1a, 0x0b, 0x02, 0x5f, 0x77, 0x2f, 0x73, 0x30, 0x7c, 0xc8, 0xd7, 0x2b, 0xcc, 0xe7, 0xf3, 0x21, 0xac, 0x53, 0xa7, 0x11, 0x5d, 0xd8, 0x1d, 0x9b, 0xf5},
- },
- { /* 57P*/
- addYX: fp.Elt{0x94, 0x63, 0x5d, 0xef, 0xfd, 0x6d, 0x25, 0x4e, 0x6d, 0x29, 0x03, 0xed, 0x24, 0x28, 0x27, 0x57, 0x47, 0x3e, 0x6a, 0x1a, 0xfe, 0x37, 0xee, 0x5f, 0x83, 0x29, 0x14, 0xfd, 0x78, 0x25, 0x8a, 0xe1, 0x02, 0x38, 0xd8, 0xca, 0x65, 0x55, 0x40, 0x7d, 0x48, 0x2c, 0x7c, 0x7e, 0x60, 0xb6, 0x0c, 0x6d, 0xf7, 0xe8, 0xb3, 0x62, 0x53, 0xd6, 0x9c, 0x2b},
- subYX: fp.Elt{0x47, 0x25, 0x70, 0x62, 0xf5, 0x65, 0x93, 0x62, 0x08, 0xac, 0x59, 0x66, 0xdb, 0x08, 0xd9, 0x1a, 0x19, 0xaf, 0xf4, 0xef, 0x02, 0xa2, 0x78, 0xa9, 0x55, 0x1c, 0xfa, 0x08, 0x11, 0xcb, 0xa3, 0x71, 0x74, 0xb1, 0x62, 0xe7, 0xc7, 0xf3, 0x5a, 0xb5, 0x8b, 0xd4, 0xf6, 0x10, 0x57, 0x79, 0x72, 0x2f, 0x13, 0x86, 0x7b, 0x44, 0x5f, 0x48, 0xfd, 0x88},
- dt2: fp.Elt{0x10, 0x02, 0xcd, 0x05, 0x9a, 0xc3, 0x32, 0x6d, 0x10, 0x3a, 0x74, 0xba, 0x06, 0xc4, 0x3b, 0x34, 0xbc, 0x36, 0xed, 0xa3, 0xba, 0x9a, 0xdb, 0x6d, 0xd4, 0x69, 0x99, 0x97, 0xd0, 0xe4, 0xdd, 0xf5, 0xd4, 0x7c, 0xd3, 0x4e, 0xab, 0xd1, 0x3b, 0xbb, 0xe9, 0xc7, 0x6a, 0x94, 0x25, 0x61, 0xf0, 0x06, 0xc5, 0x12, 0xa8, 0x86, 0xe5, 0x35, 0x46, 0xeb},
- },
- { /* 59P*/
- addYX: fp.Elt{0x9e, 0x95, 0x11, 0xc6, 0xc7, 0xe8, 0xee, 0x5a, 0x26, 0xa0, 0x72, 0x72, 0x59, 0x91, 0x59, 0x16, 0x49, 0x99, 0x7e, 0xbb, 0xd7, 0x15, 0xb4, 0xf2, 0x40, 0xf9, 0x5a, 0x4d, 0xc8, 0xa0, 0xe2, 0x34, 0x7b, 0x34, 0xf3, 0x99, 0xbf, 0xa9, 0xf3, 0x79, 0xc1, 0x1a, 0x0c, 0xf4, 0x86, 0x74, 0x4e, 0xcb, 0xbc, 0x90, 0xad, 0xb6, 0x51, 0x6d, 0xaa, 0x33},
- subYX: fp.Elt{0x9f, 0xd1, 0xc5, 0xa2, 0x6c, 0x24, 0x88, 0x15, 0x71, 0x68, 0xf6, 0x07, 0x45, 0x02, 0xc4, 0x73, 0x7e, 0x75, 0x87, 0xca, 0x7c, 0xf0, 0x92, 0x00, 0x75, 0xd6, 0x5a, 0xdd, 0xe0, 0x64, 0x16, 0x9d, 0x62, 0x80, 0x33, 0x9f, 0xf4, 0x8e, 0x1a, 0x15, 0x1c, 0xd3, 0x0f, 0x4d, 0x4f, 0x62, 0x2d, 0xd7, 0xa5, 0x77, 0xe3, 0xea, 0xf0, 0xfb, 0x1a, 0xdb},
- dt2: fp.Elt{0x6a, 0xa2, 0xb1, 0xaa, 0xfb, 0x5a, 0x32, 0x4e, 0xff, 0x47, 0x06, 0xd5, 0x9a, 0x4f, 0xce, 0x83, 0x5b, 0x82, 0x34, 0x3e, 0x47, 0xb8, 0xf8, 0xe9, 0x7c, 0x67, 0x69, 0x8d, 0x9c, 0xb7, 0xde, 0x57, 0xf4, 0x88, 0x41, 0x56, 0x0c, 0x87, 0x1e, 0xc9, 0x2f, 0x54, 0xbf, 0x5c, 0x68, 0x2c, 0xd9, 0xc4, 0xef, 0x53, 0x73, 0x1e, 0xa6, 0x38, 0x02, 0x10},
- },
- { /* 61P*/
- addYX: fp.Elt{0x08, 0x80, 0x4a, 0xc9, 0xb7, 0xa8, 0x88, 0xd9, 0xfc, 0x6a, 0xc0, 0x3e, 0xc2, 0x33, 0x4d, 0x2b, 0x2a, 0xa3, 0x6d, 0x72, 0x3e, 0xdc, 0x34, 0x68, 0x08, 0xbf, 0x27, 0xef, 0xf4, 0xff, 0xe2, 0x0c, 0x31, 0x0c, 0xa2, 0x0a, 0x1f, 0x65, 0xc1, 0x4c, 0x61, 0xd3, 0x1b, 0xbc, 0x25, 0xb1, 0xd0, 0xd4, 0x89, 0xb2, 0x53, 0xfb, 0x43, 0xa5, 0xaf, 0x04},
- subYX: fp.Elt{0xe3, 0xe1, 0x37, 0xad, 0x58, 0xa9, 0x55, 0x81, 0xee, 0x64, 0x21, 0xb9, 0xf5, 0x4c, 0x35, 0xea, 0x4a, 0xd3, 0x26, 0xaa, 0x90, 0xd4, 0x60, 0x46, 0x09, 0x4b, 0x4a, 0x62, 0xf9, 0xcd, 0xe1, 0xee, 0xbb, 0xc2, 0x09, 0x0b, 0xb0, 0x96, 0x8e, 0x43, 0x77, 0xaf, 0x25, 0x20, 0x5e, 0x47, 0xe4, 0x1d, 0x50, 0x69, 0x74, 0x08, 0xd7, 0xb9, 0x90, 0x13},
- dt2: fp.Elt{0x51, 0x91, 0x95, 0x64, 0x03, 0x16, 0xfd, 0x6e, 0x26, 0x94, 0x6b, 0x61, 0xe7, 0xd9, 0xe0, 0x4a, 0x6d, 0x7c, 0xfa, 0xc0, 0xe2, 0x43, 0x23, 0x53, 0x70, 0xf5, 0x6f, 0x73, 0x8b, 0x81, 0xb0, 0x0c, 0xee, 0x2e, 0x46, 0xf2, 0x8d, 0xa6, 0xfb, 0xb5, 0x1c, 0x33, 0xbf, 0x90, 0x59, 0xc9, 0x7c, 0xb8, 0x6f, 0xad, 0x75, 0x02, 0x90, 0x8e, 0x59, 0x75},
- },
- { /* 63P*/
- addYX: fp.Elt{0x36, 0x4d, 0x77, 0x04, 0xb8, 0x7d, 0x4a, 0xd1, 0xc5, 0xbb, 0x7b, 0x50, 0x5f, 0x8d, 0x9d, 0x62, 0x0f, 0x66, 0x71, 0xec, 0x87, 0xc5, 0x80, 0x82, 0xc8, 0xf4, 0x6a, 0x94, 0x92, 0x5b, 0xb0, 0x16, 0x9b, 0xb2, 0xc9, 0x6f, 0x2b, 0x2d, 0xee, 0x95, 0x73, 0x2e, 0xc2, 0x1b, 0xc5, 0x55, 0x36, 0x86, 0x24, 0xf8, 0x20, 0x05, 0x0d, 0x93, 0xd7, 0x76},
- subYX: fp.Elt{0x7f, 0x01, 0xeb, 0x2e, 0x48, 0x4d, 0x1d, 0xf1, 0x06, 0x7e, 0x7c, 0x2a, 0x43, 0xbf, 0x28, 0xac, 0xe9, 0x58, 0x13, 0xc8, 0xbf, 0x8e, 0xc0, 0xef, 0xe8, 0x4f, 0x46, 0x8a, 0xe7, 0xc0, 0xf6, 0x0f, 0x0a, 0x03, 0x48, 0x91, 0x55, 0x39, 0x2a, 0xe3, 0xdc, 0xf6, 0x22, 0x9d, 0x4d, 0x71, 0x55, 0x68, 0x25, 0x6e, 0x95, 0x52, 0xee, 0x4c, 0xd9, 0x01},
- dt2: fp.Elt{0xac, 0x33, 0x3f, 0x7c, 0x27, 0x35, 0x15, 0x91, 0x33, 0x8d, 0xf9, 0xc4, 0xf4, 0xf3, 0x90, 0x09, 0x75, 0x69, 0x62, 0x9f, 0x61, 0x35, 0x83, 0x92, 0x04, 0xef, 0x96, 0x38, 0x80, 0x9e, 0x88, 0xb3, 0x67, 0x95, 0xbe, 0x79, 0x3c, 0x35, 0xd8, 0xdc, 0xb2, 0x3e, 0x2d, 0xe6, 0x46, 0xbe, 0x81, 0xf3, 0x32, 0x0e, 0x37, 0x23, 0x75, 0x2a, 0x3d, 0xa0},
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go
deleted file mode 100644
index f6ac5edb..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package goldilocks
-
-import (
- "crypto/subtle"
-
- mlsb "github.com/cloudflare/circl/math/mlsbset"
-)
-
-const (
- // MLSBRecoding parameters
- fxT = 448
- fxV = 2
- fxW = 3
- fx2w1 = 1 << (uint(fxW) - 1)
-)
-
-// ScalarBaseMult returns kG where G is the generator point.
-func (e twistCurve) ScalarBaseMult(k *Scalar) *twistPoint {
- m, err := mlsb.New(fxT, fxV, fxW)
- if err != nil {
- panic(err)
- }
- if m.IsExtended() {
- panic("not extended")
- }
-
- var isZero int
- if k.IsZero() {
- isZero = 1
- }
- subtle.ConstantTimeCopy(isZero, k[:], order[:])
-
- minusK := *k
- isEven := 1 - int(k[0]&0x1)
- minusK.Neg()
- subtle.ConstantTimeCopy(isEven, k[:], minusK[:])
- c, err := m.Encode(k[:])
- if err != nil {
- panic(err)
- }
-
- gP := c.Exp(groupMLSB{})
- P := gP.(*twistPoint)
- P.cneg(uint(isEven))
- return P
-}
-
-type groupMLSB struct{}
-
-func (e groupMLSB) ExtendedEltP() mlsb.EltP { return nil }
-func (e groupMLSB) Sqr(x mlsb.EltG) { x.(*twistPoint).Double() }
-func (e groupMLSB) Mul(x mlsb.EltG, y mlsb.EltP) { x.(*twistPoint).mixAddZ1(y.(*preTwistPointAffine)) }
-func (e groupMLSB) Identity() mlsb.EltG { return twistCurve{}.Identity() }
-func (e groupMLSB) NewEltP() mlsb.EltP { return &preTwistPointAffine{} }
-func (e groupMLSB) Lookup(a mlsb.EltP, v uint, s, u int32) {
- Tabj := &tabFixMult[v]
- P := a.(*preTwistPointAffine)
- for k := range Tabj {
- P.cmov(&Tabj[k], uint(subtle.ConstantTimeEq(int32(k), u)))
- }
- P.cneg(int(s >> 31))
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/vendor/github.com/cloudflare/circl/internal/conv/conv.go
deleted file mode 100644
index 649a8e93..00000000
--- a/vendor/github.com/cloudflare/circl/internal/conv/conv.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package conv
-
-import (
- "encoding/binary"
- "fmt"
- "math/big"
- "strings"
-)
-
-// BytesLe2Hex returns an hexadecimal string of a number stored in a
-// little-endian order slice x.
-func BytesLe2Hex(x []byte) string {
- b := &strings.Builder{}
- b.Grow(2*len(x) + 2)
- fmt.Fprint(b, "0x")
- if len(x) == 0 {
- fmt.Fprint(b, "00")
- }
- for i := len(x) - 1; i >= 0; i-- {
- fmt.Fprintf(b, "%02x", x[i])
- }
- return b.String()
-}
-
-// BytesLe2BigInt converts a little-endian slice x into a big-endian
-// math/big.Int.
-func BytesLe2BigInt(x []byte) *big.Int {
- n := len(x)
- b := new(big.Int)
- if len(x) > 0 {
- y := make([]byte, n)
- for i := 0; i < n; i++ {
- y[n-1-i] = x[i]
- }
- b.SetBytes(y)
- }
- return b
-}
-
-// BytesBe2Uint64Le converts a big-endian slice x to a little-endian slice of uint64.
-func BytesBe2Uint64Le(x []byte) []uint64 {
- l := len(x)
- z := make([]uint64, (l+7)/8)
- blocks := l / 8
- for i := 0; i < blocks; i++ {
- z[i] = binary.BigEndian.Uint64(x[l-8*(i+1):])
- }
- remBytes := l % 8
- for i := 0; i < remBytes; i++ {
- z[blocks] |= uint64(x[l-1-8*blocks-i]) << uint(8*i)
- }
- return z
-}
-
-// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z.
-// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros).
-// If x does not fit in the slice or is negative, z is not modified.
-func BigInt2BytesLe(z []byte, x *big.Int) {
- xLen := (x.BitLen() + 7) >> 3
- zLen := len(z)
- if zLen >= xLen && x.Sign() >= 0 {
- y := x.Bytes()
- for i := 0; i < xLen; i++ {
- z[i] = y[xLen-1-i]
- }
- for i := xLen; i < zLen; i++ {
- z[i] = 0
- }
- }
-}
-
-// Uint64Le2BigInt converts a little-endian slice x into a big number.
-func Uint64Le2BigInt(x []uint64) *big.Int {
- n := len(x)
- b := new(big.Int)
- var bi big.Int
- for i := n - 1; i >= 0; i-- {
- bi.SetUint64(x[i])
- b.Lsh(b, 64)
- b.Add(b, &bi)
- }
- return b
-}
-
-// Uint64Le2BytesLe converts a little-endian slice x to a little-endian slice of bytes.
-func Uint64Le2BytesLe(x []uint64) []byte {
- b := make([]byte, 8*len(x))
- n := len(x)
- for i := 0; i < n; i++ {
- binary.LittleEndian.PutUint64(b[i*8:], x[i])
- }
- return b
-}
-
-// Uint64Le2BytesBe converts a little-endian slice x to a big-endian slice of bytes.
-func Uint64Le2BytesBe(x []uint64) []byte {
- b := make([]byte, 8*len(x))
- n := len(x)
- for i := 0; i < n; i++ {
- binary.BigEndian.PutUint64(b[i*8:], x[n-1-i])
- }
- return b
-}
-
-// Uint64Le2Hex returns an hexadecimal string of a number stored in a
-// little-endian order slice x.
-func Uint64Le2Hex(x []uint64) string {
- b := new(strings.Builder)
- b.Grow(16*len(x) + 2)
- fmt.Fprint(b, "0x")
- if len(x) == 0 {
- fmt.Fprint(b, "00")
- }
- for i := len(x) - 1; i >= 0; i-- {
- fmt.Fprintf(b, "%016x", x[i])
- }
- return b.String()
-}
-
-// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z.
-// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros).
-// If x does not fit in the slice or is negative, z is not modified.
-func BigInt2Uint64Le(z []uint64, x *big.Int) {
- xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words
- zLen := len(z)
- if zLen >= xLen && x.Sign() > 0 {
- var y, yi big.Int
- y.Set(x)
- two64 := big.NewInt(1)
- two64.Lsh(two64, 64).Sub(two64, big.NewInt(1))
- for i := 0; i < xLen; i++ {
- yi.And(&y, two64)
- z[i] = yi.Uint64()
- y.Rsh(&y, 64)
- }
- }
- for i := xLen; i < zLen; i++ {
- z[i] = 0
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go
deleted file mode 100644
index 7e023090..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha3 implements the SHA-3 fixed-output-length hash functions and
-// the SHAKE variable-output-length hash functions defined by FIPS-202.
-//
-// Both types of hash function use the "sponge" construction and the Keccak
-// permutation. For a detailed specification see http://keccak.noekeon.org/
-//
-// # Guidance
-//
-// If you aren't sure what function you need, use SHAKE256 with at least 64
-// bytes of output. The SHAKE instances are faster than the SHA3 instances;
-// the latter have to allocate memory to conform to the hash.Hash interface.
-//
-// If you need a secret-key MAC (message authentication code), prepend the
-// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
-// output.
-//
-// # Security strengths
-//
-// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
-// strength against preimage attacks of x bits. Since they only produce "x"
-// bits of output, their collision-resistance is only "x/2" bits.
-//
-// The SHAKE-256 and -128 functions have a generic security strength of 256 and
-// 128 bits against all attacks, provided that at least 2x bits of their output
-// is used. Requesting more than 64 or 32 bytes of output, respectively, does
-// not increase the collision-resistance of the SHAKE functions.
-//
-// # The sponge construction
-//
-// A sponge builds a pseudo-random function from a public pseudo-random
-// permutation, by applying the permutation to a state of "rate + capacity"
-// bytes, but hiding "capacity" of the bytes.
-//
-// A sponge starts out with a zero state. To hash an input using a sponge, up
-// to "rate" bytes of the input are XORed into the sponge's state. The sponge
-// is then "full" and the permutation is applied to "empty" it. This process is
-// repeated until all the input has been "absorbed". The input is then padded.
-// The digest is "squeezed" from the sponge in the same way, except that output
-// is copied out instead of input being XORed in.
-//
-// A sponge is parameterized by its generic security strength, which is equal
-// to half its capacity; capacity + rate is equal to the permutation's width.
-// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
-// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
-//
-// # Recommendations
-//
-// The SHAKE functions are recommended for most new uses. They can produce
-// output of arbitrary length. SHAKE256, with an output length of at least
-// 64 bytes, provides 256-bit security against all attacks. The Keccak team
-// recommends it for most applications upgrading from SHA2-512. (NIST chose a
-// much stronger, but much slower, sponge instance for SHA3-512.)
-//
-// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
-// They produce output of the same length, with the same security strengths
-// against all attacks. This means, in particular, that SHA3-256 only has
-// 128-bit collision resistance, because its output length is 32 bytes.
-package sha3
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go
deleted file mode 100644
index 7d2365a7..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file provides functions for creating instances of the SHA-3
-// and SHAKE hash functions, as well as utility functions for hashing
-// bytes.
-
-// New224 creates a new SHA3-224 hash.
-// Its generic security strength is 224 bits against preimage attacks,
-// and 112 bits against collision attacks.
-func New224() State {
- return State{rate: 144, outputLen: 28, dsbyte: 0x06}
-}
-
-// New256 creates a new SHA3-256 hash.
-// Its generic security strength is 256 bits against preimage attacks,
-// and 128 bits against collision attacks.
-func New256() State {
- return State{rate: 136, outputLen: 32, dsbyte: 0x06}
-}
-
-// New384 creates a new SHA3-384 hash.
-// Its generic security strength is 384 bits against preimage attacks,
-// and 192 bits against collision attacks.
-func New384() State {
- return State{rate: 104, outputLen: 48, dsbyte: 0x06}
-}
-
-// New512 creates a new SHA3-512 hash.
-// Its generic security strength is 512 bits against preimage attacks,
-// and 256 bits against collision attacks.
-func New512() State {
- return State{rate: 72, outputLen: 64, dsbyte: 0x06}
-}
-
-// Sum224 returns the SHA3-224 digest of the data.
-func Sum224(data []byte) (digest [28]byte) {
- h := New224()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum256 returns the SHA3-256 digest of the data.
-func Sum256(data []byte) (digest [32]byte) {
- h := New256()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum384 returns the SHA3-384 digest of the data.
-func Sum384(data []byte) (digest [48]byte) {
- h := New384()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum512 returns the SHA3-512 digest of the data.
-func Sum512(data []byte) (digest [64]byte) {
- h := New512()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go
deleted file mode 100644
index ab19d0ad..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go
+++ /dev/null
@@ -1,383 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// KeccakF1600 applies the Keccak permutation to a 1600b-wide
-// state represented as a slice of 25 uint64s.
-// nolint:funlen
-func KeccakF1600(a *[25]uint64) {
- // Implementation translated from Keccak-inplace.c
- // in the keccak reference code.
- var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
-
- for i := 0; i < 24; i += 4 {
- // Combines the 5 steps in each round into 2 steps.
- // Unrolls 4 rounds per loop and spreads some steps across rounds.
-
- // Round 1
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[6] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[12] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[18] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[24] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i]
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[16] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[22] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[3] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[1] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[7] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[19] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[11] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[23] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[4] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[2] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[8] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[14] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- // Round 2
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[16] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[7] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[23] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[14] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+1]
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[11] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[2] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[18] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[6] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[22] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[4] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[1] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[8] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[24] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[12] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[3] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[19] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- // Round 3
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[11] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[22] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[8] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[19] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+2]
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[1] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[12] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[23] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[16] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[2] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[24] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[6] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[3] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[14] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[7] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[18] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[4] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- // Round 4
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[1] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[2] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[3] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[4] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+3]
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[6] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[7] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[8] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[11] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[12] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[14] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[16] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[18] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[19] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[22] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[23] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[24] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go
deleted file mode 100644
index 6a3df42f..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package sha3
-
-// RC stores the round constants for use in the ι step.
-var RC = [24]uint64{
- 0x0000000000000001,
- 0x0000000000008082,
- 0x800000000000808A,
- 0x8000000080008000,
- 0x000000000000808B,
- 0x0000000080000001,
- 0x8000000080008081,
- 0x8000000000008009,
- 0x000000000000008A,
- 0x0000000000000088,
- 0x0000000080008009,
- 0x000000008000000A,
- 0x000000008000808B,
- 0x800000000000008B,
- 0x8000000000008089,
- 0x8000000000008003,
- 0x8000000000008002,
- 0x8000000000000080,
- 0x000000000000800A,
- 0x800000008000000A,
- 0x8000000080008081,
- 0x8000000000008080,
- 0x0000000080000001,
- 0x8000000080008008,
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go
deleted file mode 100644
index b35cd006..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// spongeDirection indicates the direction bytes are flowing through the sponge.
-type spongeDirection int
-
-const (
- // spongeAbsorbing indicates that the sponge is absorbing input.
- spongeAbsorbing spongeDirection = iota
- // spongeSqueezing indicates that the sponge is being squeezed.
- spongeSqueezing
-)
-
-const (
- // maxRate is the maximum size of the internal buffer. SHAKE-256
- // currently needs the largest buffer.
- maxRate = 168
-)
-
-func (d *State) buf() []byte {
- return d.storage.asBytes()[d.bufo:d.bufe]
-}
-
-type State struct {
- // Generic sponge components.
- a [25]uint64 // main state of the hash
- rate int // the number of bytes of state to use
-
- bufo int // offset of buffer in storage
- bufe int // end of buffer in storage
-
- // dsbyte contains the "domain separation" bits and the first bit of
- // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
- // SHA-3 and SHAKE functions by appending bitstrings to the message.
- // Using a little-endian bit-ordering convention, these are "01" for SHA-3
- // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
- // padding rule from section 5.1 is applied to pad the message to a multiple
- // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
- // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
- // giving 00000110b (0x06) and 00011111b (0x1f).
- // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
- // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
- // Extendable-Output Functions (May 2014)"
- dsbyte byte
-
- storage storageBuf
-
- // Specific to SHA-3 and SHAKE.
- outputLen int // the default output size in bytes
- state spongeDirection // whether the sponge is absorbing or squeezing
-}
-
-// BlockSize returns the rate of sponge underlying this hash function.
-func (d *State) BlockSize() int { return d.rate }
-
-// Size returns the output size of the hash function in bytes.
-func (d *State) Size() int { return d.outputLen }
-
-// Reset clears the internal state by zeroing the sponge state and
-// the byte buffer, and setting Sponge.state to absorbing.
-func (d *State) Reset() {
- // Zero the permutation's state.
- for i := range d.a {
- d.a[i] = 0
- }
- d.state = spongeAbsorbing
- d.bufo = 0
- d.bufe = 0
-}
-
-func (d *State) clone() *State {
- ret := *d
- return &ret
-}
-
-// permute applies the KeccakF-1600 permutation. It handles
-// any input-output buffering.
-func (d *State) permute() {
- switch d.state {
- case spongeAbsorbing:
- // If we're absorbing, we need to xor the input into the state
- // before applying the permutation.
- xorIn(d, d.buf())
- d.bufe = 0
- d.bufo = 0
- KeccakF1600(&d.a)
- case spongeSqueezing:
- // If we're squeezing, we need to apply the permutation before
- // copying more output.
- KeccakF1600(&d.a)
- d.bufe = d.rate
- d.bufo = 0
- copyOut(d, d.buf())
- }
-}
-
-// pads appends the domain separation bits in dsbyte, applies
-// the multi-bitrate 10..1 padding rule, and permutes the state.
-func (d *State) padAndPermute(dsbyte byte) {
- // Pad with this instance's domain-separator bits. We know that there's
- // at least one byte of space in d.buf() because, if it were full,
- // permute would have been called to empty it. dsbyte also contains the
- // first one bit for the padding. See the comment in the state struct.
- zerosStart := d.bufe + 1
- d.bufe = d.rate
- buf := d.buf()
- buf[zerosStart-1] = dsbyte
- for i := zerosStart; i < d.rate; i++ {
- buf[i] = 0
- }
- // This adds the final one bit for the padding. Because of the way that
- // bits are numbered from the LSB upwards, the final bit is the MSB of
- // the last byte.
- buf[d.rate-1] ^= 0x80
- // Apply the permutation
- d.permute()
- d.state = spongeSqueezing
- d.bufe = d.rate
- copyOut(d, buf)
-}
-
-// Write absorbs more data into the hash's state. It produces an error
-// if more data is written to the ShakeHash after writing
-func (d *State) Write(p []byte) (written int, err error) {
- if d.state != spongeAbsorbing {
- panic("sha3: write to sponge after read")
- }
- written = len(p)
-
- for len(p) > 0 {
- bufl := d.bufe - d.bufo
- if bufl == 0 && len(p) >= d.rate {
- // The fast path; absorb a full "rate" bytes of input and apply the permutation.
- xorIn(d, p[:d.rate])
- p = p[d.rate:]
- KeccakF1600(&d.a)
- } else {
- // The slow path; buffer the input until we can fill the sponge, and then xor it in.
- todo := d.rate - bufl
- if todo > len(p) {
- todo = len(p)
- }
- d.bufe += todo
- buf := d.buf()
- copy(buf[bufl:], p[:todo])
- p = p[todo:]
-
- // If the sponge is full, apply the permutation.
- if d.bufe == d.rate {
- d.permute()
- }
- }
- }
-
- return written, nil
-}
-
-// Read squeezes an arbitrary number of bytes from the sponge.
-func (d *State) Read(out []byte) (n int, err error) {
- // If we're still absorbing, pad and apply the permutation.
- if d.state == spongeAbsorbing {
- d.padAndPermute(d.dsbyte)
- }
-
- n = len(out)
-
- // Now, do the squeezing.
- for len(out) > 0 {
- buf := d.buf()
- n := copy(out, buf)
- d.bufo += n
- out = out[n:]
-
- // Apply the permutation if we've squeezed the sponge dry.
- if d.bufo == d.bufe {
- d.permute()
- }
- }
-
- return
-}
-
-// Sum applies padding to the hash state and then squeezes out the desired
-// number of output bytes.
-func (d *State) Sum(in []byte) []byte {
- // Make a copy of the original hash so that caller can keep writing
- // and summing.
- dup := d.clone()
- hash := make([]byte, dup.outputLen)
- _, _ = dup.Read(hash)
- return append(in, hash...)
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s
deleted file mode 100644
index 8a4458f6..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !gccgo,!appengine
-
-#include "textflag.h"
-
-// func kimd(function code, chain *[200]byte, src []byte)
-TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
- MOVD function+0(FP), R0
- MOVD chain+8(FP), R1
- LMG src+16(FP), R2, R3 // R2=base, R3=len
-
-continue:
- WORD $0xB93E0002 // KIMD --, R2
- BVS continue // continue if interrupted
- MOVD $0, R0 // reset R0 for pre-go1.8 compilers
- RET
-
-// func klmd(function code, chain *[200]byte, dst, src []byte)
-TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
- // TODO: SHAKE support
- MOVD function+0(FP), R0
- MOVD chain+8(FP), R1
- LMG dst+16(FP), R2, R3 // R2=base, R3=len
- LMG src+40(FP), R4, R5 // R4=base, R5=len
-
-continue:
- WORD $0xB93F0024 // KLMD R2, R4
- BVS continue // continue if interrupted
- MOVD $0, R0 // reset R0 for pre-go1.8 compilers
- RET
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go
deleted file mode 100644
index b92c5b7d..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file defines the ShakeHash interface, and provides
-// functions for creating SHAKE and cSHAKE instances, as well as utility
-// functions for hashing bytes to arbitrary-length output.
-//
-//
-// SHAKE implementation is based on FIPS PUB 202 [1]
-// cSHAKE implementations is based on NIST SP 800-185 [2]
-//
-// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
-// [2] https://doi.org/10.6028/NIST.SP.800-185
-
-import (
- "io"
-)
-
-// ShakeHash defines the interface to hash functions that
-// support arbitrary-length output.
-type ShakeHash interface {
- // Write absorbs more data into the hash's state. It panics if input is
- // written to it after output has been read from it.
- io.Writer
-
- // Read reads more output from the hash; reading affects the hash's
- // state. (ShakeHash.Read is thus very different from Hash.Sum)
- // It never returns an error.
- io.Reader
-
- // Clone returns a copy of the ShakeHash in its current state.
- Clone() ShakeHash
-
- // Reset resets the ShakeHash to its initial state.
- Reset()
-}
-
-// Consts for configuring initial SHA-3 state
-const (
- dsbyteShake = 0x1f
- rate128 = 168
- rate256 = 136
-)
-
-// Clone returns copy of SHAKE context within its current state.
-func (d *State) Clone() ShakeHash {
- return d.clone()
-}
-
-// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 128 bits against all attacks if at
-// least 32 bytes of its output are used.
-func NewShake128() State {
- return State{rate: rate128, dsbyte: dsbyteShake}
-}
-
-// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
-// Its generic security strength is 256 bits against all attacks if
-// at least 64 bytes of its output are used.
-func NewShake256() State {
- return State{rate: rate256, dsbyte: dsbyteShake}
-}
-
-// ShakeSum128 writes an arbitrary-length digest of data into hash.
-func ShakeSum128(hash, data []byte) {
- h := NewShake128()
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-// ShakeSum256 writes an arbitrary-length digest of data into hash.
-func ShakeSum256(hash, data []byte) {
- h := NewShake256()
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go
deleted file mode 100644
index 1e213374..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (!amd64 && !386 && !ppc64le) || appengine
-// +build !amd64,!386,!ppc64le appengine
-
-package sha3
-
-// A storageBuf is an aligned array of maxRate bytes.
-type storageBuf [maxRate]byte
-
-func (b *storageBuf) asBytes() *[maxRate]byte {
- return (*[maxRate]byte)(b)
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go
deleted file mode 100644
index 2b0c6617..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (!amd64 || appengine) && (!386 || appengine) && (!ppc64le || appengine)
-// +build !amd64 appengine
-// +build !386 appengine
-// +build !ppc64le appengine
-
-package sha3
-
-import "encoding/binary"
-
-// xorIn xors the bytes in buf into the state; it
-// makes no non-portable assumptions about memory layout
-// or alignment.
-func xorIn(d *State, buf []byte) {
- n := len(buf) / 8
-
- for i := 0; i < n; i++ {
- a := binary.LittleEndian.Uint64(buf)
- d.a[i] ^= a
- buf = buf[8:]
- }
-}
-
-// copyOut copies ulint64s to a byte buffer.
-func copyOut(d *State, b []byte) {
- for i := 0; len(b) >= 8; i++ {
- binary.LittleEndian.PutUint64(b, d.a[i])
- b = b[8:]
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go
deleted file mode 100644
index 052fc8d3..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (amd64 || 386 || ppc64le) && !appengine
-// +build amd64 386 ppc64le
-// +build !appengine
-
-package sha3
-
-import "unsafe"
-
-// A storageBuf is an aligned array of maxRate bytes.
-type storageBuf [maxRate / 8]uint64
-
-func (b *storageBuf) asBytes() *[maxRate]byte {
- return (*[maxRate]byte)(unsafe.Pointer(b))
-}
-
-// xorInuses unaligned reads and writes to update d.a to contain d.a
-// XOR buf.
-func xorIn(d *State, buf []byte) {
- n := len(buf)
- bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8]
- if n >= 72 {
- d.a[0] ^= bw[0]
- d.a[1] ^= bw[1]
- d.a[2] ^= bw[2]
- d.a[3] ^= bw[3]
- d.a[4] ^= bw[4]
- d.a[5] ^= bw[5]
- d.a[6] ^= bw[6]
- d.a[7] ^= bw[7]
- d.a[8] ^= bw[8]
- }
- if n >= 104 {
- d.a[9] ^= bw[9]
- d.a[10] ^= bw[10]
- d.a[11] ^= bw[11]
- d.a[12] ^= bw[12]
- }
- if n >= 136 {
- d.a[13] ^= bw[13]
- d.a[14] ^= bw[14]
- d.a[15] ^= bw[15]
- d.a[16] ^= bw[16]
- }
- if n >= 144 {
- d.a[17] ^= bw[17]
- }
- if n >= 168 {
- d.a[18] ^= bw[18]
- d.a[19] ^= bw[19]
- d.a[20] ^= bw[20]
- }
-}
-
-func copyOut(d *State, buf []byte) {
- ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
- copy(buf, ab[:])
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go
deleted file mode 100644
index 57a50ff5..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Package fp25519 provides prime field arithmetic over GF(2^255-19).
-package fp25519
-
-import (
- "errors"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// Size in bytes of an element.
-const Size = 32
-
-// Elt is a prime field element.
-type Elt [Size]byte
-
-func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) }
-
-// p is the prime modulus 2^255-19.
-var p = Elt{
- 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
-}
-
-// P returns the prime modulus 2^255-19.
-func P() Elt { return p }
-
-// ToBytes stores in b the little-endian byte representation of x.
-func ToBytes(b []byte, x *Elt) error {
- if len(b) != Size {
- return errors.New("wrong size")
- }
- Modp(x)
- copy(b, x[:])
- return nil
-}
-
-// IsZero returns true if x is equal to 0.
-func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} }
-
-// SetOne assigns x=1.
-func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 }
-
-// Neg calculates z = -x.
-func Neg(z, x *Elt) { Sub(z, &p, x) }
-
-// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is
-// indicated by returning isQR = true. Otherwise, when x/y is a quadratic
-// non-residue, z will have an undetermined value and isQR = false.
-func InvSqrt(z, x, y *Elt) (isQR bool) {
- sqrtMinusOne := &Elt{
- 0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4,
- 0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f,
- 0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b,
- 0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b,
- }
- t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{}
-
- Mul(t0, x, y) // t0 = u*v
- Sqr(t1, y) // t1 = v^2
- Mul(t2, t0, t1) // t2 = u*v^3
- Sqr(t0, t1) // t0 = v^4
- Mul(t1, t0, t2) // t1 = u*v^7
-
- var Tab [4]*Elt
- Tab[0] = &Elt{}
- Tab[1] = &Elt{}
- Tab[2] = t3
- Tab[3] = t1
-
- *Tab[0] = *t1
- Sqr(Tab[0], Tab[0])
- Sqr(Tab[1], Tab[0])
- Sqr(Tab[1], Tab[1])
- Mul(Tab[1], Tab[1], Tab[3])
- Mul(Tab[0], Tab[0], Tab[1])
- Sqr(Tab[0], Tab[0])
- Mul(Tab[0], Tab[0], Tab[1])
- Sqr(Tab[1], Tab[0])
- for i := 0; i < 4; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[0])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 4; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[0])
- Sqr(Tab[1], Tab[2])
- for i := 0; i < 14; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[2])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 29; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[1])
- Sqr(Tab[1], Tab[2])
- for i := 0; i < 59; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[2])
- for i := 0; i < 5; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[0])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 124; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[1])
- Sqr(Tab[2], Tab[2])
- Sqr(Tab[2], Tab[2])
- Mul(Tab[2], Tab[2], Tab[3])
-
- Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8
- // Checking whether y z^2 == x
- Sqr(t0, z) // t0 = z^2
- Mul(t0, t0, y) // t0 = yz^2
- Sub(t1, t0, x) // t1 = t0-u
- Add(t2, t0, x) // t2 = t0+u
- if IsZero(t1) {
- return true
- } else if IsZero(t2) {
- Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1)
- return true
- } else {
- return false
- }
-}
-
-// Inv calculates z = 1/x mod p.
-func Inv(z, x *Elt) {
- x0, x1, x2 := &Elt{}, &Elt{}, &Elt{}
- Sqr(x1, x)
- Sqr(x0, x1)
- Sqr(x0, x0)
- Mul(x0, x0, x)
- Mul(z, x0, x1)
- Sqr(x1, z)
- Mul(x0, x0, x1)
- Sqr(x1, x0)
- for i := 0; i < 4; i++ {
- Sqr(x1, x1)
- }
- Mul(x0, x0, x1)
- Sqr(x1, x0)
- for i := 0; i < 9; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x0)
- Sqr(x2, x1)
- for i := 0; i < 19; i++ {
- Sqr(x2, x2)
- }
- Mul(x2, x2, x1)
- for i := 0; i < 10; i++ {
- Sqr(x2, x2)
- }
- Mul(x2, x2, x0)
- Sqr(x0, x2)
- for i := 0; i < 49; i++ {
- Sqr(x0, x0)
- }
- Mul(x0, x0, x2)
- Sqr(x1, x0)
- for i := 0; i < 99; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x0)
- for i := 0; i < 50; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x2)
- for i := 0; i < 5; i++ {
- Sqr(x1, x1)
- }
- Mul(z, z, x1)
-}
-
-// Cmov assigns y to x if n is 1.
-func Cmov(x, y *Elt, n uint) { cmov(x, y, n) }
-
-// Cswap interchanges x and y if n is 1.
-func Cswap(x, y *Elt, n uint) { cswap(x, y, n) }
-
-// Add calculates z = x+y mod p.
-func Add(z, x, y *Elt) { add(z, x, y) }
-
-// Sub calculates z = x-y mod p.
-func Sub(z, x, y *Elt) { sub(z, x, y) }
-
-// AddSub calculates (x,y) = (x+y mod p, x-y mod p).
-func AddSub(x, y *Elt) { addsub(x, y) }
-
-// Mul calculates z = x*y mod p.
-func Mul(z, x, y *Elt) { mul(z, x, y) }
-
-// Sqr calculates z = x^2 mod p.
-func Sqr(z, x *Elt) { sqr(z, x) }
-
-// Modp ensures that z is between [0,p-1].
-func Modp(z *Elt) { modp(z) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go
deleted file mode 100644
index 057f0d28..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go
+++ /dev/null
@@ -1,45 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package fp25519
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) }
-func add(z, x, y *Elt) { addAmd64(z, x, y) }
-func sub(z, x, y *Elt) { subAmd64(z, x, y) }
-func addsub(x, y *Elt) { addsubAmd64(x, y) }
-func mul(z, x, y *Elt) { mulAmd64(z, x, y) }
-func sqr(z, x *Elt) { sqrAmd64(z, x) }
-func modp(z *Elt) { modpAmd64(z) }
-
-//go:noescape
-func cmovAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func cswapAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func addAmd64(z, x, y *Elt)
-
-//go:noescape
-func subAmd64(z, x, y *Elt)
-
-//go:noescape
-func addsubAmd64(x, y *Elt)
-
-//go:noescape
-func mulAmd64(z, x, y *Elt)
-
-//go:noescape
-func sqrAmd64(z, x *Elt)
-
-//go:noescape
-func modpAmd64(z *Elt)
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h
deleted file mode 100644
index b884b584..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h
+++ /dev/null
@@ -1,351 +0,0 @@
-// This code was imported from https://github.com/armfazh/rfc7748_precomputed
-
-// CHECK_BMI2ADX triggers bmi2adx if supported,
-// otherwise it fallbacks to legacy code.
-#define CHECK_BMI2ADX(label, legacy, bmi2adx) \
- CMPB ·hasBmi2Adx(SB), $0 \
- JE label \
- bmi2adx \
- RET \
- label: \
- legacy \
- RET
-
-// cselect is a conditional move
-// if b=1: it copies y into x;
-// if b=0: x remains with the same value;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, FLAGS
-// Instr: x86_64, cmov
-#define cselect(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \
- MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \
- MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \
- MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x;
-
-// cswap is a conditional swap
-// if b=1: x,y <- y,x;
-// if b=0: x,y remain with the same values;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, R8, FLAGS
-// Instr: x86_64, cmov
-#define cswap(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \
- MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \
- MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \
- MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y;
-
-// additionLeg adds x and y and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov
-#define additionLeg(z,x,y) \
- MOVL $38, AX; \
- MOVL $0, DX; \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; MOVQ R8, 0+z;
-
-// additionAdx adds x and y and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov, adx
-#define additionAdx(z,x,y) \
- MOVL $38, AX; \
- XORL DX, DX; \
- MOVQ 0+x, R8; ADCXQ 0+y, R8; \
- MOVQ 8+x, R9; ADCXQ 8+y, R9; \
- MOVQ 16+x, R10; ADCXQ 16+y, R10; \
- MOVQ 24+x, R11; ADCXQ 24+y, R11; \
- CMOVQCS AX, DX ; \
- XORL AX, AX; \
- ADCXQ DX, R8; \
- ADCXQ AX, R9; MOVQ R9, 8+z; \
- ADCXQ AX, R10; MOVQ R10, 16+z; \
- ADCXQ AX, R11; MOVQ R11, 24+z; \
- MOVL $38, DX; \
- CMOVQCS DX, AX; \
- ADDQ AX, R8; MOVQ R8, 0+z;
-
-// subtraction subtracts y from x and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov
-#define subtraction(z,x,y) \
- MOVL $38, AX; \
- MOVQ 0+x, R8; SUBQ 0+y, R8; \
- MOVQ 8+x, R9; SBBQ 8+y, R9; \
- MOVQ 16+x, R10; SBBQ 16+y, R10; \
- MOVQ 24+x, R11; SBBQ 24+y, R11; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- SUBQ DX, R8; \
- SBBQ $0, R9; MOVQ R9, 8+z; \
- SBBQ $0, R10; MOVQ R10, 16+z; \
- SBBQ $0, R11; MOVQ R11, 24+z; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- SUBQ DX, R8; MOVQ R8, 0+z;
-
-// integerMulAdx multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerMulAdx(z,x,y) \
- MOVL $0,R15; \
- MOVQ 0+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \
- MULXQ 8+x, AX, R9; ADCXQ AX, R8; \
- MULXQ 16+x, AX, R10; ADCXQ AX, R9; \
- MULXQ 24+x, AX, R11; ADCXQ AX, R10; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \
- MOVQ 8+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \
- MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \
- MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \
- MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \
- MOVQ 16+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \
- MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \
- MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \
- MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \
- MOVQ 24+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \
- MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \
- MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \
- MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z;
-
-// integerMulLeg multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerMulLeg(z,x,y) \
- MOVQ 0+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R13, R15; \
- ADCQ R14, R10; MOVQ R10, 16+z; \
- ADCQ AX, R11; MOVQ R11, 24+z; \
- ADCQ $0, DX; MOVQ DX, 32+z; \
- MOVQ 8+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 8+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 16+z, R9; MOVQ R9, R15; \
- ADCQ 24+z, R10; MOVQ R10, 24+z; \
- ADCQ 32+z, R11; MOVQ R11, 32+z; \
- ADCQ $0, DX; MOVQ DX, 40+z; \
- MOVQ 16+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 16+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 24+z, R9; MOVQ R9, R15; \
- ADCQ 32+z, R10; MOVQ R10, 32+z; \
- ADCQ 40+z, R11; MOVQ R11, 40+z; \
- ADCQ $0, DX; MOVQ DX, 48+z; \
- MOVQ 24+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 24+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 32+z, R9; MOVQ R9, 32+z; \
- ADCQ 40+z, R10; MOVQ R10, 40+z; \
- ADCQ 48+z, R11; MOVQ R11, 48+z; \
- ADCQ $0, DX; MOVQ DX, 56+z;
-
-// integerSqrLeg squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerSqrLeg(z,x) \
- MOVQ 0+x, R8; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \
- MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \
- MOVQ 24+x, R8; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \
- MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \
- \
- ADDQ R14, R10;\
- ADCQ R15, R11; MOVL $0, R15;\
- ADCQ CX, R12;\
- ADCQ AX, R13;\
- ADCQ $0, DX; MOVQ DX, R14;\
- MOVQ 8+x, AX; MULQ 16+x;\
- \
- ADDQ AX, R11;\
- ADCQ DX, R12;\
- ADCQ $0, R13;\
- ADCQ $0, R14;\
- ADCQ $0, R15;\
- \
- SHLQ $1, R14, R15; MOVQ R15, 56+z;\
- SHLQ $1, R13, R14; MOVQ R14, 48+z;\
- SHLQ $1, R12, R13; MOVQ R13, 40+z;\
- SHLQ $1, R11, R12; MOVQ R12, 32+z;\
- SHLQ $1, R10, R11; MOVQ R11, 24+z;\
- SHLQ $1, R9, R10; MOVQ R10, 16+z;\
- SHLQ $1, R9; MOVQ R9, 8+z;\
- \
- MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\
- MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\
- MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\
- MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\
- \
- ADDQ 8+z, R9; MOVQ R9, 8+z;\
- ADCQ 16+z, R10; MOVQ R10, 16+z;\
- ADCQ 24+z, R11; MOVQ R11, 24+z;\
- ADCQ 32+z, R12; MOVQ R12, 32+z;\
- ADCQ 40+z, R13; MOVQ R13, 40+z;\
- ADCQ 48+z, R14; MOVQ R14, 48+z;\
- ADCQ 56+z, R15; MOVQ R15, 56+z;
-
-// integerSqrAdx squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerSqrAdx(z,x) \
- MOVQ 0+x, DX; /* A[0] */ \
- MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \
- MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \
- MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \
- MOVQ 24+x, DX; /* A[3] */ \
- MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \
- MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \
- MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \
- MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \
- XORL R15, R15; \
- ADOXQ AX, R10; ADCXQ R8, R8; \
- ADOXQ CX, R11; ADCXQ R9, R9; \
- ADOXQ R15, R12; ADCXQ R10, R10; \
- ADOXQ R15, R13; ADCXQ R11, R11; \
- ADOXQ R15, R14; ADCXQ R12, R12; \
- ;;;;;;;;;;;;;;; ADCXQ R13, R13; \
- ;;;;;;;;;;;;;;; ADCXQ R14, R14; \
- MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \
- ;;;;;;;;;;;;;;; MOVQ AX, 0+z; \
- ADDQ CX, R8; MOVQ R8, 8+z; \
- MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \
- ADCQ AX, R9; MOVQ R9, 16+z; \
- ADCQ CX, R10; MOVQ R10, 24+z; \
- MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \
- ADCQ AX, R11; MOVQ R11, 32+z; \
- ADCQ CX, R12; MOVQ R12, 40+z; \
- MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \
- ADCQ AX, R13; MOVQ R13, 48+z; \
- ADCQ CX, R14; MOVQ R14, 56+z;
-
-// reduceFromDouble finds z congruent to x modulo p such that 0> 63)
- // PUT BIT 255 IN CARRY FLAG AND CLEAR
- x3 &^= 1 << 63
-
- x0, c0 := bits.Add64(x0, cx, 0)
- x1, c1 := bits.Add64(x1, 0, c0)
- x2, c2 := bits.Add64(x2, 0, c1)
- x3, _ = bits.Add64(x3, 0, c2)
-
- // TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19
- // cx = C[255] ? 0 : 19
- cx = uint64(19) &^ (-(x3 >> 63))
- // CLEAR BIT 255
- x3 &^= 1 << 63
-
- x0, c0 = bits.Sub64(x0, cx, 0)
- x1, c1 = bits.Sub64(x1, 0, c0)
- x2, c2 = bits.Sub64(x2, 0, c1)
- x3, _ = bits.Sub64(x3, 0, c2)
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
-}
-
-func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) {
- h0, l0 := bits.Mul64(x4, 38)
- h1, l1 := bits.Mul64(x5, 38)
- h2, l2 := bits.Mul64(x6, 38)
- h3, l3 := bits.Mul64(x7, 38)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- l0, c0 = bits.Add64(l0, x0, 0)
- l1, c1 = bits.Add64(l1, x1, c0)
- l2, c2 = bits.Add64(l2, x2, c1)
- l3, c3 := bits.Add64(l3, x3, c2)
- l4, _ = bits.Add64(l4, 0, c3)
-
- _, l4 = bits.Mul64(l4, 38)
- l0, c0 = bits.Add64(l0, l4, 0)
- z1, c1 := bits.Add64(l1, 0, c0)
- z2, c2 := bits.Add64(l2, 0, c1)
- z3, c3 := bits.Add64(l3, 0, c2)
- z0, _ := bits.Add64(l0, (-c3)&38, 0)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go
deleted file mode 100644
index 26ca4d01..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package fp25519
-
-func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) }
-func add(z, x, y *Elt) { addGeneric(z, x, y) }
-func sub(z, x, y *Elt) { subGeneric(z, x, y) }
-func addsub(x, y *Elt) { addsubGeneric(x, y) }
-func mul(z, x, y *Elt) { mulGeneric(z, x, y) }
-func sqr(z, x *Elt) { sqrGeneric(z, x) }
-func modp(z *Elt) { modpGeneric(z) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp.go b/vendor/github.com/cloudflare/circl/math/fp448/fp.go
deleted file mode 100644
index a5e36600..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1).
-package fp448
-
-import (
- "errors"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// Size in bytes of an element.
-const Size = 56
-
-// Elt is a prime field element.
-type Elt [Size]byte
-
-func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) }
-
-// p is the prime modulus 2^448-2^224-1.
-var p = Elt{
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-}
-
-// P returns the prime modulus 2^448-2^224-1.
-func P() Elt { return p }
-
-// ToBytes stores in b the little-endian byte representation of x.
-func ToBytes(b []byte, x *Elt) error {
- if len(b) != Size {
- return errors.New("wrong size")
- }
- Modp(x)
- copy(b, x[:])
- return nil
-}
-
-// IsZero returns true if x is equal to 0.
-func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} }
-
-// IsOne returns true if x is equal to 1.
-func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} }
-
-// SetOne assigns x=1.
-func SetOne(x *Elt) { *x = Elt{1} }
-
-// One returns the 1 element.
-func One() (x Elt) { x = Elt{1}; return }
-
-// Neg calculates z = -x.
-func Neg(z, x *Elt) { Sub(z, &p, x) }
-
-// Modp ensures that z is between [0,p-1].
-func Modp(z *Elt) { Sub(z, z, &p) }
-
-// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so,
-// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue,
-// and z = sqrt(-x/y).
-func InvSqrt(z, x, y *Elt) (isQR bool) {
- // First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x
- // so that's x if x is a quadratic residue and -x otherwise.
- // Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y).
- // So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y).
- // Thus if x and y are quadratic residues, then z is indeed sqrt(x/y).
- t0, t1 := &Elt{}, &Elt{}
- Mul(t0, x, y) // x*y
- Sqr(t1, y) // y^2
- Mul(t1, t0, t1) // x*y^3
- powPminus3div4(z, t1) // (x*y^3)^k
- Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1)
-
- // Check if x/y is a quadratic residue
- Sqr(t0, z) // z^2
- Mul(t0, t0, y) // y*z^2
- Sub(t0, t0, x) // y*z^2-x
- return IsZero(t0)
-}
-
-// Inv calculates z = 1/x mod p.
-func Inv(z, x *Elt) {
- // Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4.
- t := &Elt{}
- powPminus3div4(t, x) // t = x^k
- Sqr(t, t) // t = x^2k
- Sqr(t, t) // t = x^4k
- Mul(z, t, x) // z = x^(4k+1)
-}
-
-// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4.
-func powPminus3div4(z, x *Elt) {
- x0, x1 := &Elt{}, &Elt{}
- Sqr(z, x)
- Mul(z, z, x)
- Sqr(x0, z)
- Mul(x0, x0, x)
- Sqr(z, x0)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 5; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- for i := 0; i < 11; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
- Sqr(z, z)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 26; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- for i := 0; i < 53; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
- Sqr(z, z)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 110; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- Mul(z, z, x)
- for i := 0; i < 223; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
-}
-
-// Cmov assigns y to x if n is 1.
-func Cmov(x, y *Elt, n uint) { cmov(x, y, n) }
-
-// Cswap interchanges x and y if n is 1.
-func Cswap(x, y *Elt, n uint) { cswap(x, y, n) }
-
-// Add calculates z = x+y mod p.
-func Add(z, x, y *Elt) { add(z, x, y) }
-
-// Sub calculates z = x-y mod p.
-func Sub(z, x, y *Elt) { sub(z, x, y) }
-
-// AddSub calculates (x,y) = (x+y mod p, x-y mod p).
-func AddSub(x, y *Elt) { addsub(x, y) }
-
-// Mul calculates z = x*y mod p.
-func Mul(z, x, y *Elt) { mul(z, x, y) }
-
-// Sqr calculates z = x^2 mod p.
-func Sqr(z, x *Elt) { sqr(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go
deleted file mode 100644
index 6a12209a..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go
+++ /dev/null
@@ -1,43 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package fp448
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) }
-func add(z, x, y *Elt) { addAmd64(z, x, y) }
-func sub(z, x, y *Elt) { subAmd64(z, x, y) }
-func addsub(x, y *Elt) { addsubAmd64(x, y) }
-func mul(z, x, y *Elt) { mulAmd64(z, x, y) }
-func sqr(z, x *Elt) { sqrAmd64(z, x) }
-
-/* Functions defined in fp_amd64.s */
-
-//go:noescape
-func cmovAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func cswapAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func addAmd64(z, x, y *Elt)
-
-//go:noescape
-func subAmd64(z, x, y *Elt)
-
-//go:noescape
-func addsubAmd64(x, y *Elt)
-
-//go:noescape
-func mulAmd64(z, x, y *Elt)
-
-//go:noescape
-func sqrAmd64(z, x *Elt)
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h
deleted file mode 100644
index 536fe5bd..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h
+++ /dev/null
@@ -1,591 +0,0 @@
-// This code was imported from https://github.com/armfazh/rfc7748_precomputed
-
-// CHECK_BMI2ADX triggers bmi2adx if supported,
-// otherwise it fallbacks to legacy code.
-#define CHECK_BMI2ADX(label, legacy, bmi2adx) \
- CMPB ·hasBmi2Adx(SB), $0 \
- JE label \
- bmi2adx \
- RET \
- label: \
- legacy \
- RET
-
-// cselect is a conditional move
-// if b=1: it copies y into x;
-// if b=0: x remains with the same value;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, FLAGS
-// Instr: x86_64, cmov
-#define cselect(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \
- MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \
- MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \
- MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \
- MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \
- MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \
- MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x;
-
-// cswap is a conditional swap
-// if b=1: x,y <- y,x;
-// if b=0: x,y remain with the same values;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, R8, FLAGS
-// Instr: x86_64, cmov
-#define cswap(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \
- MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \
- MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \
- MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \
- MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \
- MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \
- MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y;
-
-// additionLeg adds x and y and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64
-#define additionLeg(z,x,y) \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- MOVQ 32+x, R12; ADCQ 32+y, R12; \
- MOVQ 40+x, R13; ADCQ 40+y, R13; \
- MOVQ 48+x, R14; ADCQ 48+y, R14; \
- MOVQ $0, AX; ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ $0, AX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ DX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ R8, 0+z; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ DX, R11; MOVQ R11, 24+z; \
- ADCQ $0, R12; MOVQ R12, 32+z; \
- ADCQ $0, R13; MOVQ R13, 40+z; \
- ADCQ $0, R14; MOVQ R14, 48+z;
-
-
-// additionAdx adds x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, adx
-#define additionAdx(z,x,y) \
- MOVL $32, R15; \
- XORL DX, DX; \
- MOVQ 0+x, R8; ADCXQ 0+y, R8; \
- MOVQ 8+x, R9; ADCXQ 8+y, R9; \
- MOVQ 16+x, R10; ADCXQ 16+y, R10; \
- MOVQ 24+x, R11; ADCXQ 24+y, R11; \
- MOVQ 32+x, R12; ADCXQ 32+y, R12; \
- MOVQ 40+x, R13; ADCXQ 40+y, R13; \
- MOVQ 48+x, R14; ADCXQ 48+y, R14; \
- ;;;;;;;;;;;;;;; ADCXQ DX, DX; \
- XORL AX, AX; \
- ADCXQ DX, R8; SHLXQ R15, DX, DX; \
- ADCXQ AX, R9; \
- ADCXQ AX, R10; \
- ADCXQ DX, R11; \
- ADCXQ AX, R12; \
- ADCXQ AX, R13; \
- ADCXQ AX, R14; \
- ADCXQ AX, AX; \
- XORL DX, DX; \
- ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \
- ADCXQ DX, R9; MOVQ R9, 8+z; \
- ADCXQ DX, R10; MOVQ R10, 16+z; \
- ADCXQ AX, R11; MOVQ R11, 24+z; \
- ADCXQ DX, R12; MOVQ R12, 32+z; \
- ADCXQ DX, R13; MOVQ R13, 40+z; \
- ADCXQ DX, R14; MOVQ R14, 48+z;
-
-// subtraction subtracts y from x and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64
-#define subtraction(z,x,y) \
- MOVQ 0+x, R8; SUBQ 0+y, R8; \
- MOVQ 8+x, R9; SBBQ 8+y, R9; \
- MOVQ 16+x, R10; SBBQ 16+y, R10; \
- MOVQ 24+x, R11; SBBQ 24+y, R11; \
- MOVQ 32+x, R12; SBBQ 32+y, R12; \
- MOVQ 40+x, R13; SBBQ 40+y, R13; \
- MOVQ 48+x, R14; SBBQ 48+y, R14; \
- MOVQ $0, AX; SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ $0, AX; \
- SBBQ $0, R9; \
- SBBQ $0, R10; \
- SBBQ DX, R11; \
- SBBQ $0, R12; \
- SBBQ $0, R13; \
- SBBQ $0, R14; \
- SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ R8, 0+z; \
- SBBQ $0, R9; MOVQ R9, 8+z; \
- SBBQ $0, R10; MOVQ R10, 16+z; \
- SBBQ DX, R11; MOVQ R11, 24+z; \
- SBBQ $0, R12; MOVQ R12, 32+z; \
- SBBQ $0, R13; MOVQ R13, 40+z; \
- SBBQ $0, R14; MOVQ R14, 48+z;
-
-// maddBmi2Adx multiplies x and y and accumulates in z
-// Uses: AX, DX, R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \
- MOVQ i+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \
- MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \
- MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \
- MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \
- MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \
- MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \
- MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \
- ;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0;
-
-// integerMulAdx multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerMulAdx(z,x,y) \
- MOVL $0,R15; \
- MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \
- MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \
- MULXQ 8+x, AX, R10; ADCXQ AX, R9; \
- MULXQ 16+x, AX, R11; ADCXQ AX, R10; \
- MULXQ 24+x, AX, R12; ADCXQ AX, R11; \
- MULXQ 32+x, AX, R13; ADCXQ AX, R12; \
- MULXQ 40+x, AX, R14; ADCXQ AX, R13; \
- MULXQ 48+x, AX, R15; ADCXQ AX, R14; \
- ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \
- maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \
- maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \
- maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \
- maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \
- maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \
- maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \
- MOVQ R15, 56+z; \
- MOVQ R9, 64+z; \
- MOVQ R10, 72+z; \
- MOVQ R11, 80+z; \
- MOVQ R12, 88+z; \
- MOVQ R13, 96+z; \
- MOVQ R14, 104+z;
-
-// maddLegacy multiplies x and y and accumulates in z
-// Uses: AX, DX, R15, FLAGS
-// Instr: x86_64
-#define maddLegacy(z,x,y,i) \
- MOVQ i+y, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \
- ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \
- ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \
- ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \
- ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \
- ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \
- ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \
- ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \
- ADCQ $0, DX; MOVQ DX, 56+i+z;
-
-// integerMulLeg multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerMulLeg(z,x,y) \
- MOVQ 0+y, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \
- maddLegacy(z,x,y, 8) \
- maddLegacy(z,x,y,16) \
- maddLegacy(z,x,y,24) \
- maddLegacy(z,x,y,32) \
- maddLegacy(z,x,y,40) \
- maddLegacy(z,x,y,48)
-
-// integerSqrLeg squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerSqrLeg(z,x) \
- XORL R15, R15; \
- MOVQ 0+x, CX; \
- MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \
- ADDQ CX, CX; ADCQ $0, R15; \
- MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \
- MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- \
- MOVQ 8+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \
- ADDQ 8+x, CX; ADCQ $0, R15; \
- MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \
- \
- MOVQ 16+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \
- ADDQ 16+x, CX; ADCQ $0, R15; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \
- \
- MOVQ 24+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \
- ADDQ 24+x, CX; ADCQ $0, R15; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \
- \
- MOVQ 32+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \
- ADDQ 32+x, CX; ADCQ $0, R15; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \
- \
- XORL R13, R13; \
- XORL R14, R14; \
- MOVQ 40+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \
- ADDQ 40+x, CX; ADCQ $0, R15; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \
- \
- XORL R9, R9; \
- MOVQ 48+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z;
-
-
-// integerSqrAdx squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerSqrAdx(z,x) \
- XORL R15, R15; \
- MOVQ 0+x, DX; \
- ;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \
- ADDQ DX, DX; ADCQ $0, R15; CLC; \
- MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \
- MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\
- MULXQ 24+x, AX, R11; ADCXQ AX, R10; \
- MULXQ 32+x, AX, R12; ADCXQ AX, R11; \
- MULXQ 40+x, AX, R13; ADCXQ AX, R12; \
- MULXQ 48+x, AX, R14; ADCXQ AX, R13; \
- ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \
- \
- MOVQ 8+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \
- ADDQ AX, R9; MOVQ R9, 16+z; \
- ADCQ CX, R8; \
- ADCQ $0, R11; \
- ADDQ 8+x, DX; \
- ADCQ $0, R15; \
- XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \
- MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \
- MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \
- \
- MOVQ 16+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \
- ADDQ AX, R11; MOVQ R11, 32+z; \
- ADCQ CX, R8; \
- ADCQ $0, R13; \
- ADDQ 16+x, DX; \
- ADCQ $0, R15; \
- XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \
- MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \
- \
- MOVQ 24+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \
- ADDQ AX, R13; MOVQ R13, 48+z; \
- ADCQ CX, R8; \
- ADCQ $0, R9; \
- ADDQ 24+x, DX; \
- ADCQ $0, R15; \
- XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \
- \
- MOVQ 32+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \
- ADDQ AX, R9; MOVQ R9, 64+z; \
- ADCQ CX, R8; \
- ADCQ $0, R11; \
- ADDQ 32+x, DX; \
- ADCQ $0, R15; \
- XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \
- \
- MOVQ 40+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \
- ADDQ AX, R11; MOVQ R11, 80+z; \
- ADCQ CX, R8; \
- ADCQ $0, R13; \
- ADDQ 40+x, DX; \
- ADCQ $0, R15; \
- XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \
- \
- MOVQ 48+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \
- XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \
- ;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \
- ;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z;
-
-// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z
-// Uses: AX, R8-R15, FLAGS
-// Instr: x86_64
-#define reduceFromDoubleLeg(z,x) \
- /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \
- /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \
- MOVQ 80+x,AX; MOVQ AX,R10; \
- MOVQ $0xFFFFFFFF00000000, R8; \
- ANDQ R8,R10; \
- \
- MOVQ $0,R14; \
- MOVQ 104+x,R13; SHLQ $1,R13,R14; \
- MOVQ 96+x,R12; SHLQ $1,R12,R13; \
- MOVQ 88+x,R11; SHLQ $1,R11,R12; \
- MOVQ 72+x, R9; SHLQ $1,R10,R11; \
- MOVQ 64+x, R8; SHLQ $1,R10; \
- MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \
- MOVQ 56+x,R15; \
- \
- ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \
- ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \
- ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \
- ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \
- ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \
- ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \
- ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \
- ADCQ $0,R14; \
- /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \
- /* ( r9, r8, r15, r13, r12, r11, r10) */ \
- MOVQ R10, AX; \
- SHRQ $32,R11,R10; \
- SHRQ $32,R12,R11; \
- SHRQ $32,R13,R12; \
- SHRQ $32,R15,R13; \
- SHRQ $32, R8,R15; \
- SHRQ $32, R9, R8; \
- SHRQ $32, AX, R9; \
- \
- ADDQ 0+z,R10; \
- ADCQ 8+z,R11; \
- ADCQ 16+z,R12; \
- ADCQ 24+z,R13; \
- ADCQ 32+z,R15; \
- ADCQ 40+z, R8; \
- ADCQ 48+z, R9; \
- ADCQ $0,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- ADDQ R14,R10; MOVQ $0,R14; \
- ADCQ $0,R11; \
- ADCQ $0,R12; \
- ADCQ AX,R13; \
- ADCQ $0,R15; \
- ADCQ $0, R8; \
- ADCQ $0, R9; \
- ADCQ $0,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32,AX; \
- ADDQ R14,R10; MOVQ R10, 0+z; \
- ADCQ $0,R11; MOVQ R11, 8+z; \
- ADCQ $0,R12; MOVQ R12,16+z; \
- ADCQ AX,R13; MOVQ R13,24+z; \
- ADCQ $0,R15; MOVQ R15,32+z; \
- ADCQ $0, R8; MOVQ R8,40+z; \
- ADCQ $0, R9; MOVQ R9,48+z;
-
-// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z
-// Uses: AX, R8-R15, FLAGS
-// Instr: x86_64, adx
-#define reduceFromDoubleAdx(z,x) \
- /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \
- /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \
- MOVQ 80+x,AX; MOVQ AX,R10; \
- MOVQ $0xFFFFFFFF00000000, R8; \
- ANDQ R8,R10; \
- \
- MOVQ $0,R14; \
- MOVQ 104+x,R13; SHLQ $1,R13,R14; \
- MOVQ 96+x,R12; SHLQ $1,R12,R13; \
- MOVQ 88+x,R11; SHLQ $1,R11,R12; \
- MOVQ 72+x, R9; SHLQ $1,R10,R11; \
- MOVQ 64+x, R8; SHLQ $1,R10; \
- MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \
- MOVQ 56+x,R15; \
- \
- XORL AX,AX; \
- ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \
- ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \
- ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \
- ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \
- ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \
- ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \
- ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \
- ADCXQ AX,R14; \
- /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \
- /* ( r9, r8, r15, r13, r12, r11, r10) */ \
- MOVQ R10, AX; \
- SHRQ $32,R11,R10; \
- SHRQ $32,R12,R11; \
- SHRQ $32,R13,R12; \
- SHRQ $32,R15,R13; \
- SHRQ $32, R8,R15; \
- SHRQ $32, R9, R8; \
- SHRQ $32, AX, R9; \
- \
- XORL AX,AX; \
- ADCXQ 0+z,R10; \
- ADCXQ 8+z,R11; \
- ADCXQ 16+z,R12; \
- ADCXQ 24+z,R13; \
- ADCXQ 32+z,R15; \
- ADCXQ 40+z, R8; \
- ADCXQ 48+z, R9; \
- ADCXQ AX,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- CLC; \
- ADCXQ R14,R10; MOVQ $0,R14; \
- ADCXQ R14,R11; \
- ADCXQ R14,R12; \
- ADCXQ AX,R13; \
- ADCXQ R14,R15; \
- ADCXQ R14, R8; \
- ADCXQ R14, R9; \
- ADCXQ R14,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- CLC; \
- ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \
- ADCXQ R14,R11; MOVQ R11, 8+z; \
- ADCXQ R14,R12; MOVQ R12,16+z; \
- ADCXQ AX,R13; MOVQ R13,24+z; \
- ADCXQ R14,R15; MOVQ R15,32+z; \
- ADCXQ R14, R8; MOVQ R8,40+z; \
- ADCXQ R14, R9; MOVQ R9,48+z;
-
-// addSub calculates two operations: x,y = x+y,x-y
-// Uses: AX, DX, R8-R15, FLAGS
-#define addSub(x,y) \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- MOVQ 32+x, R12; ADCQ 32+y, R12; \
- MOVQ 40+x, R13; ADCQ 40+y, R13; \
- MOVQ 48+x, R14; ADCQ 48+y, R14; \
- MOVQ $0, AX; ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ $0, AX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ DX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \
- ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \
- ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \
- ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \
- ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \
- ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \
- ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \
- SUBQ 0+y, R8; \
- SBBQ 8+y, R9; \
- SBBQ 16+y, R10; \
- SBBQ 24+y, R11; \
- SBBQ 32+y, R12; \
- SBBQ 40+y, R13; \
- SBBQ 48+y, R14; \
- MOVQ $0, AX; SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ $0, AX; \
- SBBQ $0, R9; \
- SBBQ $0, R10; \
- SBBQ DX, R11; \
- SBBQ $0, R12; \
- SBBQ $0, R13; \
- SBBQ $0, R14; \
- SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ R8, 0+y; \
- SBBQ $0, R9; MOVQ R9, 8+y; \
- SBBQ $0, R10; MOVQ R10, 16+y; \
- SBBQ DX, R11; MOVQ R11, 24+y; \
- SBBQ $0, R12; MOVQ R12, 32+y; \
- SBBQ $0, R13; MOVQ R13, 40+y; \
- SBBQ $0, R14; MOVQ R14, 48+y;
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s
deleted file mode 100644
index 435addf5..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s
+++ /dev/null
@@ -1,74 +0,0 @@
-// +build amd64
-
-#include "textflag.h"
-#include "fp_amd64.h"
-
-// func cmovAmd64(x, y *Elt, n uint)
-TEXT ·cmovAmd64(SB),NOSPLIT,$0-24
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- MOVQ n+16(FP), BX
- cselect(0(DI),0(SI),BX)
- RET
-
-// func cswapAmd64(x, y *Elt, n uint)
-TEXT ·cswapAmd64(SB),NOSPLIT,$0-24
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- MOVQ n+16(FP), BX
- cswap(0(DI),0(SI),BX)
- RET
-
-// func subAmd64(z, x, y *Elt)
-TEXT ·subAmd64(SB),NOSPLIT,$0-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- subtraction(0(DI),0(SI),0(BX))
- RET
-
-// func addsubAmd64(x, y *Elt)
-TEXT ·addsubAmd64(SB),NOSPLIT,$0-16
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- addSub(0(DI),0(SI))
- RET
-
-#define addLegacy \
- additionLeg(0(DI),0(SI),0(BX))
-#define addBmi2Adx \
- additionAdx(0(DI),0(SI),0(BX))
-
-#define mulLegacy \
- integerMulLeg(0(SP),0(SI),0(BX)) \
- reduceFromDoubleLeg(0(DI),0(SP))
-#define mulBmi2Adx \
- integerMulAdx(0(SP),0(SI),0(BX)) \
- reduceFromDoubleAdx(0(DI),0(SP))
-
-#define sqrLegacy \
- integerSqrLeg(0(SP),0(SI)) \
- reduceFromDoubleLeg(0(DI),0(SP))
-#define sqrBmi2Adx \
- integerSqrAdx(0(SP),0(SI)) \
- reduceFromDoubleAdx(0(DI),0(SP))
-
-// func addAmd64(z, x, y *Elt)
-TEXT ·addAmd64(SB),NOSPLIT,$0-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx)
-
-// func mulAmd64(z, x, y *Elt)
-TEXT ·mulAmd64(SB),NOSPLIT,$112-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx)
-
-// func sqrAmd64(z, x *Elt)
-TEXT ·sqrAmd64(SB),NOSPLIT,$112-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx)
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go
deleted file mode 100644
index 47a0b632..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go
+++ /dev/null
@@ -1,339 +0,0 @@
-package fp448
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func cmovGeneric(x, y *Elt, n uint) {
- m := -uint64(n & 0x1)
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- x0 = (x0 &^ m) | (y0 & m)
- x1 = (x1 &^ m) | (y1 & m)
- x2 = (x2 &^ m) | (y2 & m)
- x3 = (x3 &^ m) | (y3 & m)
- x4 = (x4 &^ m) | (y4 & m)
- x5 = (x5 &^ m) | (y5 & m)
- x6 = (x6 &^ m) | (y6 & m)
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
- binary.LittleEndian.PutUint64(x[4*8:5*8], x4)
- binary.LittleEndian.PutUint64(x[5*8:6*8], x5)
- binary.LittleEndian.PutUint64(x[6*8:7*8], x6)
-}
-
-func cswapGeneric(x, y *Elt, n uint) {
- m := -uint64(n & 0x1)
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- t0 := m & (x0 ^ y0)
- t1 := m & (x1 ^ y1)
- t2 := m & (x2 ^ y2)
- t3 := m & (x3 ^ y3)
- t4 := m & (x4 ^ y4)
- t5 := m & (x5 ^ y5)
- t6 := m & (x6 ^ y6)
- x0 ^= t0
- x1 ^= t1
- x2 ^= t2
- x3 ^= t3
- x4 ^= t4
- x5 ^= t5
- x6 ^= t6
- y0 ^= t0
- y1 ^= t1
- y2 ^= t2
- y3 ^= t3
- y4 ^= t4
- y5 ^= t5
- y6 ^= t6
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
- binary.LittleEndian.PutUint64(x[4*8:5*8], x4)
- binary.LittleEndian.PutUint64(x[5*8:6*8], x5)
- binary.LittleEndian.PutUint64(x[6*8:7*8], x6)
-
- binary.LittleEndian.PutUint64(y[0*8:1*8], y0)
- binary.LittleEndian.PutUint64(y[1*8:2*8], y1)
- binary.LittleEndian.PutUint64(y[2*8:3*8], y2)
- binary.LittleEndian.PutUint64(y[3*8:4*8], y3)
- binary.LittleEndian.PutUint64(y[4*8:5*8], y4)
- binary.LittleEndian.PutUint64(y[5*8:6*8], y5)
- binary.LittleEndian.PutUint64(y[6*8:7*8], y6)
-}
-
-func addGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- z0, c0 := bits.Add64(x0, y0, 0)
- z1, c1 := bits.Add64(x1, y1, c0)
- z2, c2 := bits.Add64(x2, y2, c1)
- z3, c3 := bits.Add64(x3, y3, c2)
- z4, c4 := bits.Add64(x4, y4, c3)
- z5, c5 := bits.Add64(x5, y5, c4)
- z6, z7 := bits.Add64(x6, y6, c5)
-
- z0, c0 = bits.Add64(z0, z7, 0)
- z1, c1 = bits.Add64(z1, 0, c0)
- z2, c2 = bits.Add64(z2, 0, c1)
- z3, c3 = bits.Add64(z3, z7<<32, c2)
- z4, c4 = bits.Add64(z4, 0, c3)
- z5, c5 = bits.Add64(z5, 0, c4)
- z6, z7 = bits.Add64(z6, 0, c5)
-
- z0, c0 = bits.Add64(z0, z7, 0)
- z1, c1 = bits.Add64(z1, 0, c0)
- z2, c2 = bits.Add64(z2, 0, c1)
- z3, c3 = bits.Add64(z3, z7<<32, c2)
- z4, c4 = bits.Add64(z4, 0, c3)
- z5, c5 = bits.Add64(z5, 0, c4)
- z6, _ = bits.Add64(z6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], z4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], z5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], z6)
-}
-
-func subGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- z0, c0 := bits.Sub64(x0, y0, 0)
- z1, c1 := bits.Sub64(x1, y1, c0)
- z2, c2 := bits.Sub64(x2, y2, c1)
- z3, c3 := bits.Sub64(x3, y3, c2)
- z4, c4 := bits.Sub64(x4, y4, c3)
- z5, c5 := bits.Sub64(x5, y5, c4)
- z6, z7 := bits.Sub64(x6, y6, c5)
-
- z0, c0 = bits.Sub64(z0, z7, 0)
- z1, c1 = bits.Sub64(z1, 0, c0)
- z2, c2 = bits.Sub64(z2, 0, c1)
- z3, c3 = bits.Sub64(z3, z7<<32, c2)
- z4, c4 = bits.Sub64(z4, 0, c3)
- z5, c5 = bits.Sub64(z5, 0, c4)
- z6, z7 = bits.Sub64(z6, 0, c5)
-
- z0, c0 = bits.Sub64(z0, z7, 0)
- z1, c1 = bits.Sub64(z1, 0, c0)
- z2, c2 = bits.Sub64(z2, 0, c1)
- z3, c3 = bits.Sub64(z3, z7<<32, c2)
- z4, c4 = bits.Sub64(z4, 0, c3)
- z5, c5 = bits.Sub64(z5, 0, c4)
- z6, _ = bits.Sub64(z6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], z4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], z5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], z6)
-}
-
-func addsubGeneric(x, y *Elt) {
- z := &Elt{}
- addGeneric(z, x, y)
- subGeneric(y, x, y)
- *x = *z
-}
-
-func mulGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6}
- zz := [7]uint64{}
-
- yi := yy[0]
- h0, l0 := bits.Mul64(x0, yi)
- h1, l1 := bits.Mul64(x1, yi)
- h2, l2 := bits.Mul64(x2, yi)
- h3, l3 := bits.Mul64(x3, yi)
- h4, l4 := bits.Mul64(x4, yi)
- h5, l5 := bits.Mul64(x5, yi)
- h6, l6 := bits.Mul64(x6, yi)
-
- zz[0] = l0
- a0, c0 := bits.Add64(h0, l1, 0)
- a1, c1 := bits.Add64(h1, l2, c0)
- a2, c2 := bits.Add64(h2, l3, c1)
- a3, c3 := bits.Add64(h3, l4, c2)
- a4, c4 := bits.Add64(h4, l5, c3)
- a5, c5 := bits.Add64(h5, l6, c4)
- a6, _ := bits.Add64(h6, 0, c5)
-
- for i := 1; i < 7; i++ {
- yi = yy[i]
- h0, l0 = bits.Mul64(x0, yi)
- h1, l1 = bits.Mul64(x1, yi)
- h2, l2 = bits.Mul64(x2, yi)
- h3, l3 = bits.Mul64(x3, yi)
- h4, l4 = bits.Mul64(x4, yi)
- h5, l5 = bits.Mul64(x5, yi)
- h6, l6 = bits.Mul64(x6, yi)
-
- zz[i], c0 = bits.Add64(a0, l0, 0)
- a0, c1 = bits.Add64(a1, l1, c0)
- a1, c2 = bits.Add64(a2, l2, c1)
- a2, c3 = bits.Add64(a3, l3, c2)
- a3, c4 = bits.Add64(a4, l4, c3)
- a4, c5 = bits.Add64(a5, l5, c4)
- a5, a6 = bits.Add64(a6, l6, c5)
-
- a0, c0 = bits.Add64(a0, h0, 0)
- a1, c1 = bits.Add64(a1, h1, c0)
- a2, c2 = bits.Add64(a2, h2, c1)
- a3, c3 = bits.Add64(a3, h3, c2)
- a4, c4 = bits.Add64(a4, h4, c3)
- a5, c5 = bits.Add64(a5, h5, c4)
- a6, _ = bits.Add64(a6, h6, c5)
- }
- red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6})
-}
-
-func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) }
-
-func red64(z *Elt, l, h *[7]uint64) {
- /* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */
- h0 := h[0]
- h1 := h[1]
- h2 := h[2]
- h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF)
- h4 := (h[3] >> 63) | (h[4] << 1)
- h5 := (h[4] >> 63) | (h[5] << 1)
- h6 := (h[5] >> 63) | (h[6] << 1)
- h7 := (h[6] >> 63)
-
- l0, c0 := bits.Add64(h0, l[0], 0)
- l1, c1 := bits.Add64(h1, l[1], c0)
- l2, c2 := bits.Add64(h2, l[2], c1)
- l3, c3 := bits.Add64(h3, l[3], c2)
- l4, c4 := bits.Add64(h4, l[4], c3)
- l5, c5 := bits.Add64(h5, l[5], c4)
- l6, c6 := bits.Add64(h6, l[6], c5)
- l7, _ := bits.Add64(h7, 0, c6)
-
- /* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */
- h0 = (h[3] >> 32) | (h[4] << 32)
- h1 = (h[4] >> 32) | (h[5] << 32)
- h2 = (h[5] >> 32) | (h[6] << 32)
- h3 = (h[6] >> 32) | (h[0] << 32)
- h4 = (h[0] >> 32) | (h[1] << 32)
- h5 = (h[1] >> 32) | (h[2] << 32)
- h6 = (h[2] >> 32) | (h[3] << 32)
-
- l0, c0 = bits.Add64(l0, h0, 0)
- l1, c1 = bits.Add64(l1, h1, c0)
- l2, c2 = bits.Add64(l2, h2, c1)
- l3, c3 = bits.Add64(l3, h3, c2)
- l4, c4 = bits.Add64(l4, h4, c3)
- l5, c5 = bits.Add64(l5, h5, c4)
- l6, c6 = bits.Add64(l6, h6, c5)
- l7, _ = bits.Add64(l7, 0, c6)
-
- /* (C7) + (C6,...,C0) */
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, l7 = bits.Add64(l6, 0, c5)
-
- /* (C7) + (C6,...,C0) */
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, _ = bits.Add64(l6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], l0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], l1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], l2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], l3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], l4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], l5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], l6)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go
deleted file mode 100644
index a62225d2..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package fp448
-
-func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) }
-func add(z, x, y *Elt) { addGeneric(z, x, y) }
-func sub(z, x, y *Elt) { subGeneric(z, x, y) }
-func addsub(x, y *Elt) { addsubGeneric(x, y) }
-func mul(z, x, y *Elt) { mulGeneric(z, x, y) }
-func sqr(z, x *Elt) { sqrGeneric(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go
deleted file mode 100644
index 2d7afc80..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go
+++ /dev/null
@@ -1,75 +0,0 @@
-//go:build gofuzz
-// +build gofuzz
-
-// How to run the fuzzer:
-//
-// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz
-// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build
-// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a
-// $ clang -fsanitize=fuzzer lib.a -o fu.exe
-// $ ./fu.exe
-package fp448
-
-import (
- "encoding/binary"
- "fmt"
- "math/big"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// FuzzReduction is a fuzzer target for red64 function, which reduces t
-// (112 bits) to a number t' (56 bits) congruent modulo p448.
-func FuzzReduction(data []byte) int {
- if len(data) != 2*Size {
- return -1
- }
- var got, want Elt
- var lo, hi [7]uint64
- a := data[:Size]
- b := data[Size:]
- lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8])
- lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8])
- lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8])
- lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8])
- lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8])
- lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8])
- lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8])
-
- hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8])
- hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8])
- hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8])
- hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8])
- hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8])
- hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8])
- hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8])
-
- red64(&got, &lo, &hi)
-
- t := conv.BytesLe2BigInt(data[:2*Size])
-
- two448 := big.NewInt(1)
- two448.Lsh(two448, 448) // 2^448
- mask448 := big.NewInt(1)
- mask448.Sub(two448, mask448) // 2^448-1
- two224plus1 := big.NewInt(1)
- two224plus1.Lsh(two224plus1, 224)
- two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1
-
- var loBig, hiBig big.Int
- for t.Cmp(two448) >= 0 {
- loBig.And(t, mask448)
- hiBig.Rsh(t, 448)
- t.Mul(&hiBig, two224plus1)
- t.Add(t, &loBig)
- }
- conv.BigInt2BytesLe(want[:], t)
-
- if got != want {
- fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size]))
- fmt.Printf("got: %v\n", got)
- fmt.Printf("want: %v\n", want)
- panic("error found")
- }
- return 1
-}
diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go
deleted file mode 100644
index a43851b8..00000000
--- a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Package mlsbset provides a constant-time exponentiation method with precomputation.
-//
-// References: "Efficient and secure algorithms for GLV-based scalar
-// multiplication and their implementation on GLV–GLS curves" by (Faz-Hernandez et al.)
-// - https://doi.org/10.1007/s13389-014-0085-7
-// - https://eprint.iacr.org/2013/158
-package mlsbset
-
-import (
- "errors"
- "fmt"
- "math/big"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// EltG is a group element.
-type EltG interface{}
-
-// EltP is a precomputed group element.
-type EltP interface{}
-
-// Group defines the operations required by MLSBSet exponentiation method.
-type Group interface {
- Identity() EltG // Returns the identity of the group.
- Sqr(x EltG) // Calculates x = x^2.
- Mul(x EltG, y EltP) // Calculates x = x*y.
- NewEltP() EltP // Returns an arbitrary precomputed element.
- ExtendedEltP() EltP // Returns the precomputed element x^(2^(w*d)).
- Lookup(a EltP, v uint, s, u int32) // Sets a = s*T[v][u].
-}
-
-// Params contains the parameters of the encoding.
-type Params struct {
- T uint // T is the maximum size (in bits) of exponents.
- V uint // V is the number of tables.
- W uint // W is the window size.
- E uint // E is the number of digits per table.
- D uint // D is the number of digits in total.
- L uint // L is the length of the code.
-}
-
-// Encoder allows to convert integers into valid powers.
-type Encoder struct{ p Params }
-
-// New produces an encoder of the MLSBSet algorithm.
-func New(t, v, w uint) (Encoder, error) {
- if !(t > 1 && v >= 1 && w >= 2) {
- return Encoder{}, errors.New("t>1, v>=1, w>=2")
- }
- e := (t + w*v - 1) / (w * v)
- d := e * v
- l := d * w
- return Encoder{Params{t, v, w, e, d, l}}, nil
-}
-
-// Encode converts an odd integer k into a valid power for exponentiation.
-func (m Encoder) Encode(k []byte) (*Power, error) {
- if len(k) == 0 {
- return nil, errors.New("empty slice")
- }
- if !(len(k) <= int(m.p.L+7)>>3) {
- return nil, errors.New("k too big")
- }
- if k[0]%2 == 0 {
- return nil, errors.New("k must be odd")
- }
- ap := int((m.p.L+7)/8) - len(k)
- k = append(k, make([]byte, ap)...)
- s := m.signs(k)
- b := make([]int32, m.p.L-m.p.D)
- c := conv.BytesLe2BigInt(k)
- c.Rsh(c, m.p.D)
- var bi big.Int
- for i := m.p.D; i < m.p.L; i++ {
- c0 := int32(c.Bit(0))
- b[i-m.p.D] = s[i%m.p.D] * c0
- bi.SetInt64(int64(b[i-m.p.D] >> 1))
- c.Rsh(c, 1)
- c.Sub(c, &bi)
- }
- carry := int(c.Int64())
- return &Power{m, s, b, carry}, nil
-}
-
-// signs calculates the set of signs.
-func (m Encoder) signs(k []byte) []int32 {
- s := make([]int32, m.p.D)
- s[m.p.D-1] = 1
- for i := uint(1); i < m.p.D; i++ {
- ki := int32((k[i>>3] >> (i & 0x7)) & 0x1)
- s[i-1] = 2*ki - 1
- }
- return s
-}
-
-// GetParams returns the complementary parameters of the encoding.
-func (m Encoder) GetParams() Params { return m.p }
-
-// tableSize returns the size of each table.
-func (m Encoder) tableSize() uint { return 1 << (m.p.W - 1) }
-
-// Elts returns the total number of elements that must be precomputed.
-func (m Encoder) Elts() uint { return m.p.V * m.tableSize() }
-
-// IsExtended returns true if the element x^(2^(wd)) must be calculated.
-func (m Encoder) IsExtended() bool { q := m.p.T / (m.p.V * m.p.W); return m.p.T == q*m.p.V*m.p.W }
-
-// Ops returns the number of squares and multiplications executed during an exponentiation.
-func (m Encoder) Ops() (S uint, M uint) {
- S = m.p.E
- M = m.p.E * m.p.V
- if m.IsExtended() {
- M++
- }
- return
-}
-
-func (m Encoder) String() string {
- return fmt.Sprintf("T: %v W: %v V: %v e: %v d: %v l: %v wv|t: %v",
- m.p.T, m.p.W, m.p.V, m.p.E, m.p.D, m.p.L, m.IsExtended())
-}
diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go
deleted file mode 100644
index 3f214c30..00000000
--- a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package mlsbset
-
-import "fmt"
-
-// Power is a valid exponent produced by the MLSBSet encoding algorithm.
-type Power struct {
- set Encoder // parameters of code.
- s []int32 // set of signs.
- b []int32 // set of digits.
- c int // carry is {0,1}.
-}
-
-// Exp is calculates x^k, where x is a predetermined element of a group G.
-func (p *Power) Exp(G Group) EltG {
- a, b := G.Identity(), G.NewEltP()
- for e := int(p.set.p.E - 1); e >= 0; e-- {
- G.Sqr(a)
- for v := uint(0); v < p.set.p.V; v++ {
- sgnElt, idElt := p.Digit(v, uint(e))
- G.Lookup(b, v, sgnElt, idElt)
- G.Mul(a, b)
- }
- }
- if p.set.IsExtended() && p.c == 1 {
- G.Mul(a, G.ExtendedEltP())
- }
- return a
-}
-
-// Digit returns the (v,e)-th digit and its sign.
-func (p *Power) Digit(v, e uint) (sgn, dig int32) {
- sgn = p.bit(0, v, e)
- dig = 0
- for i := p.set.p.W - 1; i > 0; i-- {
- dig = 2*dig + p.bit(i, v, e)
- }
- mask := dig >> 31
- dig = (dig + mask) ^ mask
- return sgn, dig
-}
-
-// bit returns the (w,v,e)-th bit of the code.
-func (p *Power) bit(w, v, e uint) int32 {
- if !(w < p.set.p.W &&
- v < p.set.p.V &&
- e < p.set.p.E) {
- panic(fmt.Errorf("indexes outside (%v,%v,%v)", w, v, e))
- }
- if w == 0 {
- return p.s[p.set.p.E*v+e]
- }
- return p.b[p.set.p.D*(w-1)+p.set.p.E*v+e]
-}
-
-func (p *Power) String() string {
- dig := ""
- for j := uint(0); j < p.set.p.V; j++ {
- for i := uint(0); i < p.set.p.E; i++ {
- s, d := p.Digit(j, i)
- dig += fmt.Sprintf("(%2v,%2v) = %+2v %+2v\n", j, i, s, d)
- }
- }
- return fmt.Sprintf("len: %v\ncarry: %v\ndigits:\n%v", len(p.b)+len(p.s), p.c, dig)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/wnaf.go b/vendor/github.com/cloudflare/circl/math/wnaf.go
deleted file mode 100644
index 94a1ec50..00000000
--- a/vendor/github.com/cloudflare/circl/math/wnaf.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Package math provides some utility functions for big integers.
-package math
-
-import "math/big"
-
-// SignedDigit obtains the signed-digit recoding of n and returns a list L of
-// digits such that n = sum( L[i]*2^(i*(w-1)) ), and each L[i] is an odd number
-// in the set {±1, ±3, ..., ±2^(w-1)-1}. The third parameter ensures that the
-// output has ceil(l/(w-1)) digits.
-//
-// Restrictions:
-// - n is odd and n > 0.
-// - 1 < w < 32.
-// - l >= bit length of n.
-//
-// References:
-// - Alg.6 in "Exponent Recoding and Regular Exponentiation Algorithms"
-// by Joye-Tunstall. http://doi.org/10.1007/978-3-642-02384-2_21
-// - Alg.6 in "Selecting Elliptic Curves for Cryptography: An Efficiency and
-// Security Analysis" by Bos et al. http://doi.org/10.1007/s13389-015-0097-y
-func SignedDigit(n *big.Int, w, l uint) []int32 {
- if n.Sign() <= 0 || n.Bit(0) == 0 {
- panic("n must be non-zero, odd, and positive")
- }
- if w <= 1 || w >= 32 {
- panic("Verify that 1 < w < 32")
- }
- if uint(n.BitLen()) > l {
- panic("n is too big to fit in l digits")
- }
- lenN := (l + (w - 1) - 1) / (w - 1) // ceil(l/(w-1))
- L := make([]int32, lenN+1)
- var k, v big.Int
- k.Set(n)
-
- var i uint
- for i = 0; i < lenN; i++ {
- words := k.Bits()
- value := int32(words[0] & ((1 << w) - 1))
- value -= int32(1) << (w - 1)
- L[i] = value
- v.SetInt64(int64(value))
- k.Sub(&k, &v)
- k.Rsh(&k, w-1)
- }
- L[i] = int32(k.Int64())
- return L
-}
-
-// OmegaNAF obtains the window-w Non-Adjacent Form of a positive number n and
-// 1 < w < 32. The returned slice L holds n = sum( L[i]*2^i ).
-//
-// Reference:
-// - Alg.9 "Efficient arithmetic on Koblitz curves" by Solinas.
-// http://doi.org/10.1023/A:1008306223194
-func OmegaNAF(n *big.Int, w uint) (L []int32) {
- if n.Sign() < 0 {
- panic("n must be positive")
- }
- if w <= 1 || w >= 32 {
- panic("Verify that 1 < w < 32")
- }
-
- L = make([]int32, n.BitLen()+1)
- var k, v big.Int
- k.Set(n)
-
- i := 0
- for ; k.Sign() > 0; i++ {
- value := int32(0)
- if k.Bit(0) == 1 {
- words := k.Bits()
- value = int32(words[0] & ((1 << w) - 1))
- if value >= (int32(1) << (w - 1)) {
- value -= int32(1) << w
- }
- v.SetInt64(int64(value))
- k.Sub(&k, &v)
- }
- L[i] = value
- k.Rsh(&k, 1)
- }
- return L[:i]
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go
deleted file mode 100644
index 08ca65d7..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go
+++ /dev/null
@@ -1,453 +0,0 @@
-// Package ed25519 implements Ed25519 signature scheme as described in RFC-8032.
-//
-// This package provides optimized implementations of the three signature
-// variants and maintaining closer compatiblilty with crypto/ed25519.
-//
-// | Scheme Name | Sign Function | Verification | Context |
-// |-------------|-------------------|---------------|-------------------|
-// | Ed25519 | Sign | Verify | None |
-// | Ed25519Ph | SignPh | VerifyPh | Yes, can be empty |
-// | Ed25519Ctx | SignWithCtx | VerifyWithCtx | Yes, non-empty |
-// | All above | (PrivateKey).Sign | VerifyAny | As above |
-//
-// Specific functions for sign and verify are defined. A generic signing
-// function for all schemes is available through the crypto.Signer interface,
-// which is implemented by the PrivateKey type. A correspond all-in-one
-// verification method is provided by the VerifyAny function.
-//
-// Signing with Ed25519Ph or Ed25519Ctx requires a context string for domain
-// separation. This parameter is passed using a SignerOptions struct defined
-// in this package. While Ed25519Ph accepts an empty context, Ed25519Ctx
-// enforces non-empty context strings.
-//
-// # Compatibility with crypto.ed25519
-//
-// These functions are compatible with the “Ed25519” function defined in
-// RFC-8032. However, unlike RFC 8032's formulation, this package's private
-// key representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the
-// RFC-8032 private key as the “seed”.
-//
-// References
-//
-// - RFC-8032: https://rfc-editor.org/rfc/rfc8032.txt
-// - Ed25519: https://ed25519.cr.yp.to/
-// - EdDSA: High-speed high-security signatures. https://doi.org/10.1007/s13389-012-0027-1
-package ed25519
-
-import (
- "bytes"
- "crypto"
- cryptoRand "crypto/rand"
- "crypto/sha512"
- "crypto/subtle"
- "errors"
- "fmt"
- "io"
- "strconv"
-
- "github.com/cloudflare/circl/sign"
-)
-
-const (
- // ContextMaxSize is the maximum length (in bytes) allowed for context.
- ContextMaxSize = 255
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-const (
- paramB = 256 / 8 // Size of keys in bytes.
-)
-
-// SignerOptions implements crypto.SignerOpts and augments with parameters
-// that are specific to the Ed25519 signature schemes.
-type SignerOptions struct {
- // Hash must be crypto.Hash(0) for Ed25519/Ed25519ctx, or crypto.SHA512
- // for Ed25519ph.
- crypto.Hash
-
- // Context is an optional domain separation string for Ed25519ph and a
- // must for Ed25519ctx. Its length must be less or equal than 255 bytes.
- Context string
-
- // Scheme is an identifier for choosing a signature scheme. The zero value
- // is ED25519.
- Scheme SchemeID
-}
-
-// SchemeID is an identifier for each signature scheme.
-type SchemeID uint
-
-const (
- ED25519 SchemeID = iota
- ED25519Ph
- ED25519Ctx
-)
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Equal reports whether priv and x have the same value.
-func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(PrivateKey)
- return ok && subtle.ConstantTimeCompare(priv, xx) == 1
-}
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, priv[SeedSize:])
- return publicKey
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:SeedSize])
- return seed
-}
-
-func (priv PrivateKey) Scheme() sign.Scheme { return sch }
-
-func (pub PublicKey) Scheme() sign.Scheme { return sch }
-
-func (priv PrivateKey) MarshalBinary() (data []byte, err error) {
- privateKey := make(PrivateKey, PrivateKeySize)
- copy(privateKey, priv)
- return privateKey, nil
-}
-
-func (pub PublicKey) MarshalBinary() (data []byte, err error) {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, pub)
- return publicKey, nil
-}
-
-// Equal reports whether pub and x have the same value.
-func (pub PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(PublicKey)
- return ok && bytes.Equal(pub, xx)
-}
-
-// Sign creates a signature of a message with priv key.
-// This function is compatible with crypto.ed25519 and also supports the
-// three signature variants defined in RFC-8032, namely Ed25519 (or pure
-// EdDSA), Ed25519Ph, and Ed25519Ctx.
-// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx
-// variant. This can be achieved by passing crypto.Hash(0) as the value for
-// opts.
-// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant.
-// This can be achieved by passing crypto.SHA512 as the value for opts.
-// Use a SignerOptions struct (defined in this package) to pass a context
-// string for signing.
-func (priv PrivateKey) Sign(
- rand io.Reader,
- message []byte,
- opts crypto.SignerOpts,
-) (signature []byte, err error) {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0):
- return Sign(priv, message), nil
- case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512:
- return SignPh(priv, message, ctx), nil
- case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0:
- return SignWithCtx(priv, message, ctx), nil
- default:
- return nil, errors.New("ed25519: bad hash algorithm")
- }
-}
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptoRand.Reader
- }
-
- seed := make([]byte, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
-
- return publicKey, privateKey, nil
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- privateKey := make(PrivateKey, PrivateKeySize)
- newKeyFromSeed(privateKey, seed)
- return privateKey
-}
-
-func newKeyFromSeed(privateKey, seed []byte) {
- if l := len(seed); l != SeedSize {
- panic("ed25519: bad seed length: " + strconv.Itoa(l))
- }
- var P pointR1
- k := sha512.Sum512(seed)
- clamp(k[:])
- reduceModOrder(k[:paramB], false)
- P.fixedMult(k[:paramB])
- copy(privateKey[:SeedSize], seed)
- _ = P.ToBytes(privateKey[SeedSize:])
-}
-
-func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) {
- if l := len(privateKey); l != PrivateKeySize {
- panic("ed25519: bad private key length: " + strconv.Itoa(l))
- }
-
- H := sha512.New()
- var PHM []byte
-
- if preHash {
- _, _ = H.Write(message)
- PHM = H.Sum(nil)
- H.Reset()
- } else {
- PHM = message
- }
-
- // 1. Hash the 32-byte private key using SHA-512.
- _, _ = H.Write(privateKey[:SeedSize])
- h := H.Sum(nil)
- clamp(h[:])
- prefix, s := h[paramB:], h[:paramB]
-
- // 2. Compute SHA-512(dom2(F, C) || prefix || PH(M))
- H.Reset()
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(prefix)
- _, _ = H.Write(PHM)
- r := H.Sum(nil)
- reduceModOrder(r[:], true)
-
- // 3. Compute the point [r]B.
- var P pointR1
- P.fixedMult(r[:paramB])
- R := (&[paramB]byte{})[:]
- if err := P.ToBytes(R); err != nil {
- panic(err)
- }
-
- // 4. Compute SHA512(dom2(F, C) || R || A || PH(M)).
- H.Reset()
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(privateKey[SeedSize:])
- _, _ = H.Write(PHM)
- hRAM := H.Sum(nil)
-
- reduceModOrder(hRAM[:], true)
-
- // 5. Compute S = (r + k * s) mod order.
- S := (&[paramB]byte{})[:]
- calculateS(S, r[:paramB], hRAM[:paramB], s)
-
- // 6. The signature is the concatenation of R and S.
- copy(signature[:paramB], R[:])
- copy(signature[paramB:], S[:])
-}
-
-// Sign signs the message with privateKey and returns a signature.
-// This function supports the signature variant defined in RFC-8032: Ed25519,
-// also known as the pure version of EdDSA.
-// It will panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(""), false)
- return signature
-}
-
-// SignPh creates a signature of a message with private key and context.
-// This function supports the signature variant defined in RFC-8032: Ed25519ph,
-// meaning it internally hashes the message using SHA-512, and optionally
-// accepts a context string.
-// It will panic if len(privateKey) is not PrivateKeySize.
-// Context could be passed to this function, which length should be no more than
-// ContextMaxSize=255. It can be empty.
-func SignPh(privateKey PrivateKey, message []byte, ctx string) []byte {
- if len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed25519: bad context length: %v", len(ctx)))
- }
-
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(ctx), true)
- return signature
-}
-
-// SignWithCtx creates a signature of a message with private key and context.
-// This function supports the signature variant defined in RFC-8032: Ed25519ctx,
-// meaning it accepts a non-empty context string.
-// It will panic if len(privateKey) is not PrivateKeySize.
-// Context must be passed to this function, which length should be no more than
-// ContextMaxSize=255 and cannot be empty.
-func SignWithCtx(privateKey PrivateKey, message []byte, ctx string) []byte {
- if len(ctx) == 0 || len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed25519: bad context length: %v > %v", len(ctx), ContextMaxSize))
- }
-
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(ctx), false)
- return signature
-}
-
-func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool {
- if len(public) != PublicKeySize ||
- len(signature) != SignatureSize ||
- !isLessThanOrder(signature[paramB:]) {
- return false
- }
-
- var P pointR1
- if ok := P.FromBytes(public); !ok {
- return false
- }
-
- H := sha512.New()
- var PHM []byte
-
- if preHash {
- _, _ = H.Write(message)
- PHM = H.Sum(nil)
- H.Reset()
- } else {
- PHM = message
- }
-
- R := signature[:paramB]
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(public)
- _, _ = H.Write(PHM)
- hRAM := H.Sum(nil)
- reduceModOrder(hRAM[:], true)
-
- var Q pointR1
- encR := (&[paramB]byte{})[:]
- P.neg()
- Q.doubleMult(&P, signature[paramB:], hRAM[:paramB])
- _ = Q.ToBytes(encR)
- return bytes.Equal(R, encR)
-}
-
-// VerifyAny returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports all the three signature variants defined in RFC-8032,
-// namely Ed25519 (or pure EdDSA), Ed25519Ph, and Ed25519Ctx.
-// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx
-// variant. This can be achieved by passing crypto.Hash(0) as the value for opts.
-// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant.
-// This can be achieved by passing crypto.SHA512 as the value for opts.
-// Use a SignerOptions struct to pass a context string for signing.
-func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0):
- return Verify(public, message, signature)
- case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512:
- return VerifyPh(public, message, signature, ctx)
- case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0:
- return VerifyWithCtx(public, message, signature, ctx)
- default:
- return false
- }
-}
-
-// Verify returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed25519,
-// also known as the pure version of EdDSA.
-func Verify(public PublicKey, message, signature []byte) bool {
- return verify(public, message, signature, []byte(""), false)
-}
-
-// VerifyPh returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed25519ph,
-// meaning it internally hashes the message using SHA-512.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), true)
-}
-
-// VerifyWithCtx returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded, or when context is
-// not provided.
-// This function supports the signature variant defined in RFC-8032: Ed25519ctx,
-// meaning it does not handle prehashed messages. Non-empty context string must be
-// provided, and must not be more than 255 of length.
-func VerifyWithCtx(public PublicKey, message, signature []byte, ctx string) bool {
- if len(ctx) == 0 || len(ctx) > ContextMaxSize {
- return false
- }
-
- return verify(public, message, signature, []byte(ctx), false)
-}
-
-func clamp(k []byte) {
- k[0] &= 248
- k[paramB-1] = (k[paramB-1] & 127) | 64
-}
-
-// isLessThanOrder returns true if 0 <= x < order.
-func isLessThanOrder(x []byte) bool {
- i := len(order) - 1
- for i > 0 && x[i] == order[i] {
- i--
- }
- return x[i] < order[i]
-}
-
-func writeDom(h io.Writer, ctx []byte, preHash bool) {
- dom2 := "SigEd25519 no Ed25519 collisions"
-
- if len(ctx) > 0 {
- _, _ = h.Write([]byte(dom2))
- if preHash {
- _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))})
- } else {
- _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))})
- }
- _, _ = h.Write(ctx)
- } else if preHash {
- _, _ = h.Write([]byte(dom2))
- _, _ = h.Write([]byte{0x01, 0x00})
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go
deleted file mode 100644
index 10efafdc..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package ed25519
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-var order = [paramB]byte{
- 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
- 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
-}
-
-// isLessThan returns true if 0 <= x < y, and assumes that slices have the same length.
-func isLessThan(x, y []byte) bool {
- i := len(x) - 1
- for i > 0 && x[i] == y[i] {
- i--
- }
- return x[i] < y[i]
-}
-
-// reduceModOrder calculates k = k mod order of the curve.
-func reduceModOrder(k []byte, is512Bit bool) {
- var X [((2 * paramB) * 8) / 64]uint64
- numWords := len(k) >> 3
- for i := 0; i < numWords; i++ {
- X[i] = binary.LittleEndian.Uint64(k[i*8 : (i+1)*8])
- }
- red512(&X, is512Bit)
- for i := 0; i < numWords; i++ {
- binary.LittleEndian.PutUint64(k[i*8:(i+1)*8], X[i])
- }
-}
-
-// red512 calculates x = x mod Order of the curve.
-func red512(x *[8]uint64, full bool) {
- // Implementation of Algs.(14.47)+(14.52) of Handbook of Applied
- // Cryptography, by A. Menezes, P. van Oorschot, and S. Vanstone.
- const (
- ell0 = uint64(0x5812631a5cf5d3ed)
- ell1 = uint64(0x14def9dea2f79cd6)
- ell160 = uint64(0x812631a5cf5d3ed0)
- ell161 = uint64(0x4def9dea2f79cd65)
- ell162 = uint64(0x0000000000000001)
- )
-
- var c0, c1, c2, c3 uint64
- r0, r1, r2, r3, r4 := x[0], x[1], x[2], x[3], uint64(0)
-
- if full {
- q0, q1, q2, q3 := x[4], x[5], x[6], x[7]
-
- for i := 0; i < 3; i++ {
- h0, s0 := bits.Mul64(q0, ell160)
- h1, s1 := bits.Mul64(q1, ell160)
- h2, s2 := bits.Mul64(q2, ell160)
- h3, s3 := bits.Mul64(q3, ell160)
-
- s1, c0 = bits.Add64(h0, s1, 0)
- s2, c1 = bits.Add64(h1, s2, c0)
- s3, c2 = bits.Add64(h2, s3, c1)
- s4, _ := bits.Add64(h3, 0, c2)
-
- h0, l0 := bits.Mul64(q0, ell161)
- h1, l1 := bits.Mul64(q1, ell161)
- h2, l2 := bits.Mul64(q2, ell161)
- h3, l3 := bits.Mul64(q3, ell161)
-
- l1, c0 = bits.Add64(h0, l1, 0)
- l2, c1 = bits.Add64(h1, l2, c0)
- l3, c2 = bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- s1, c0 = bits.Add64(s1, l0, 0)
- s2, c1 = bits.Add64(s2, l1, c0)
- s3, c2 = bits.Add64(s3, l2, c1)
- s4, c3 = bits.Add64(s4, l3, c2)
- s5, s6 := bits.Add64(l4, 0, c3)
-
- s2, c0 = bits.Add64(s2, q0, 0)
- s3, c1 = bits.Add64(s3, q1, c0)
- s4, c2 = bits.Add64(s4, q2, c1)
- s5, c3 = bits.Add64(s5, q3, c2)
- s6, s7 := bits.Add64(s6, 0, c3)
-
- q := q0 | q1 | q2 | q3
- m := -((q | -q) >> 63) // if q=0 then m=0...0 else m=1..1
- s0 &= m
- s1 &= m
- s2 &= m
- s3 &= m
- q0, q1, q2, q3 = s4, s5, s6, s7
-
- if (i+1)%2 == 0 {
- r0, c0 = bits.Add64(r0, s0, 0)
- r1, c1 = bits.Add64(r1, s1, c0)
- r2, c2 = bits.Add64(r2, s2, c1)
- r3, c3 = bits.Add64(r3, s3, c2)
- r4, _ = bits.Add64(r4, 0, c3)
- } else {
- r0, c0 = bits.Sub64(r0, s0, 0)
- r1, c1 = bits.Sub64(r1, s1, c0)
- r2, c2 = bits.Sub64(r2, s2, c1)
- r3, c3 = bits.Sub64(r3, s3, c2)
- r4, _ = bits.Sub64(r4, 0, c3)
- }
- }
-
- m := -(r4 >> 63)
- r0, c0 = bits.Add64(r0, m&ell160, 0)
- r1, c1 = bits.Add64(r1, m&ell161, c0)
- r2, c2 = bits.Add64(r2, m&ell162, c1)
- r3, c3 = bits.Add64(r3, 0, c2)
- r4, _ = bits.Add64(r4, m&1, c3)
- x[4], x[5], x[6], x[7] = 0, 0, 0, 0
- }
-
- q0 := (r4 << 4) | (r3 >> 60)
- r3 &= (uint64(1) << 60) - 1
-
- h0, s0 := bits.Mul64(ell0, q0)
- h1, s1 := bits.Mul64(ell1, q0)
- s1, c0 = bits.Add64(h0, s1, 0)
- s2, _ := bits.Add64(h1, 0, c0)
-
- r0, c0 = bits.Sub64(r0, s0, 0)
- r1, c1 = bits.Sub64(r1, s1, c0)
- r2, c2 = bits.Sub64(r2, s2, c1)
- r3, _ = bits.Sub64(r3, 0, c2)
-
- x[0], x[1], x[2], x[3] = r0, r1, r2, r3
-}
-
-// calculateS performs s = r+k*a mod Order of the curve.
-func calculateS(s, r, k, a []byte) {
- K := [4]uint64{
- binary.LittleEndian.Uint64(k[0*8 : 1*8]),
- binary.LittleEndian.Uint64(k[1*8 : 2*8]),
- binary.LittleEndian.Uint64(k[2*8 : 3*8]),
- binary.LittleEndian.Uint64(k[3*8 : 4*8]),
- }
- S := [8]uint64{
- binary.LittleEndian.Uint64(r[0*8 : 1*8]),
- binary.LittleEndian.Uint64(r[1*8 : 2*8]),
- binary.LittleEndian.Uint64(r[2*8 : 3*8]),
- binary.LittleEndian.Uint64(r[3*8 : 4*8]),
- }
- var c3 uint64
- for i := range K {
- ai := binary.LittleEndian.Uint64(a[i*8 : (i+1)*8])
-
- h0, l0 := bits.Mul64(K[0], ai)
- h1, l1 := bits.Mul64(K[1], ai)
- h2, l2 := bits.Mul64(K[2], ai)
- h3, l3 := bits.Mul64(K[3], ai)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- S[i+0], c0 = bits.Add64(S[i+0], l0, 0)
- S[i+1], c1 = bits.Add64(S[i+1], l1, c0)
- S[i+2], c2 = bits.Add64(S[i+2], l2, c1)
- S[i+3], c3 = bits.Add64(S[i+3], l3, c2)
- S[i+4], _ = bits.Add64(S[i+4], l4, c3)
- }
- red512(&S, true)
- binary.LittleEndian.PutUint64(s[0*8:1*8], S[0])
- binary.LittleEndian.PutUint64(s[1*8:2*8], S[1])
- binary.LittleEndian.PutUint64(s[2*8:3*8], S[2])
- binary.LittleEndian.PutUint64(s[3*8:4*8], S[3])
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go
deleted file mode 100644
index 3216aae3..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package ed25519
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "math/bits"
-
- "github.com/cloudflare/circl/internal/conv"
- "github.com/cloudflare/circl/math"
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-var paramD = fp.Elt{
- 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
- 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
- 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
- 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52,
-}
-
-// mLSBRecoding parameters.
-const (
- fxT = 257
- fxV = 2
- fxW = 3
- fx2w1 = 1 << (uint(fxW) - 1)
- numWords64 = (paramB * 8 / 64)
-)
-
-// mLSBRecoding is the odd-only modified LSB-set.
-//
-// Reference:
-//
-// "Efficient and secure algorithms for GLV-based scalar multiplication and
-// their implementation on GLV–GLS curves" by (Faz-Hernandez et al.)
-// http://doi.org/10.1007/s13389-014-0085-7.
-func mLSBRecoding(L []int8, k []byte) {
- const ee = (fxT + fxW*fxV - 1) / (fxW * fxV)
- const dd = ee * fxV
- const ll = dd * fxW
- if len(L) == (ll + 1) {
- var m [numWords64 + 1]uint64
- for i := 0; i < numWords64; i++ {
- m[i] = binary.LittleEndian.Uint64(k[8*i : 8*i+8])
- }
- condAddOrderN(&m)
- L[dd-1] = 1
- for i := 0; i < dd-1; i++ {
- kip1 := (m[(i+1)/64] >> (uint(i+1) % 64)) & 0x1
- L[i] = int8(kip1<<1) - 1
- }
- { // right-shift by d
- right := uint(dd % 64)
- left := uint(64) - right
- lim := ((numWords64+1)*64 - dd) / 64
- j := dd / 64
- for i := 0; i < lim; i++ {
- m[i] = (m[i+j] >> right) | (m[i+j+1] << left)
- }
- m[lim] = m[lim+j] >> right
- }
- for i := dd; i < ll; i++ {
- L[i] = L[i%dd] * int8(m[0]&0x1)
- div2subY(m[:], int64(L[i]>>1), numWords64)
- }
- L[ll] = int8(m[0])
- }
-}
-
-// absolute returns always a positive value.
-func absolute(x int32) int32 {
- mask := x >> 31
- return (x + mask) ^ mask
-}
-
-// condAddOrderN updates x = x+order if x is even, otherwise x remains unchanged.
-func condAddOrderN(x *[numWords64 + 1]uint64) {
- isOdd := (x[0] & 0x1) - 1
- c := uint64(0)
- for i := 0; i < numWords64; i++ {
- orderWord := binary.LittleEndian.Uint64(order[8*i : 8*i+8])
- o := isOdd & orderWord
- x0, c0 := bits.Add64(x[i], o, c)
- x[i] = x0
- c = c0
- }
- x[numWords64], _ = bits.Add64(x[numWords64], 0, c)
-}
-
-// div2subY update x = (x/2) - y.
-func div2subY(x []uint64, y int64, l int) {
- s := uint64(y >> 63)
- for i := 0; i < l-1; i++ {
- x[i] = (x[i] >> 1) | (x[i+1] << 63)
- }
- x[l-1] = (x[l-1] >> 1)
-
- b := uint64(0)
- x0, b0 := bits.Sub64(x[0], uint64(y), b)
- x[0] = x0
- b = b0
- for i := 1; i < l-1; i++ {
- x0, b0 := bits.Sub64(x[i], s, b)
- x[i] = x0
- b = b0
- }
- x[l-1], _ = bits.Sub64(x[l-1], s, b)
-}
-
-func (P *pointR1) fixedMult(scalar []byte) {
- if len(scalar) != paramB {
- panic("wrong scalar size")
- }
- const ee = (fxT + fxW*fxV - 1) / (fxW * fxV)
- const dd = ee * fxV
- const ll = dd * fxW
-
- L := make([]int8, ll+1)
- mLSBRecoding(L[:], scalar)
- S := &pointR3{}
- P.SetIdentity()
- for ii := ee - 1; ii >= 0; ii-- {
- P.double()
- for j := 0; j < fxV; j++ {
- dig := L[fxW*dd-j*ee+ii-ee]
- for i := (fxW-1)*dd - j*ee + ii - ee; i >= (2*dd - j*ee + ii - ee); i = i - dd {
- dig = 2*dig + L[i]
- }
- idx := absolute(int32(dig))
- sig := L[dd-j*ee+ii-ee]
- Tabj := &tabSign[fxV-j-1]
- for k := 0; k < fx2w1; k++ {
- S.cmov(&Tabj[k], subtle.ConstantTimeEq(int32(k), idx))
- }
- S.cneg(subtle.ConstantTimeEq(int32(sig), -1))
- P.mixAdd(S)
- }
- }
-}
-
-const (
- omegaFix = 7
- omegaVar = 5
-)
-
-// doubleMult returns P=mG+nQ.
-func (P *pointR1) doubleMult(Q *pointR1, m, n []byte) {
- nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m), omegaFix)
- nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n), omegaVar)
-
- if len(nafFix) > len(nafVar) {
- nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...)
- } else if len(nafFix) < len(nafVar) {
- nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...)
- }
-
- var TabQ [1 << (omegaVar - 2)]pointR2
- Q.oddMultiples(TabQ[:])
- P.SetIdentity()
- for i := len(nafFix) - 1; i >= 0; i-- {
- P.double()
- // Generator point
- if nafFix[i] != 0 {
- idxM := absolute(nafFix[i]) >> 1
- R := tabVerif[idxM]
- if nafFix[i] < 0 {
- R.neg()
- }
- P.mixAdd(&R)
- }
- // Variable input point
- if nafVar[i] != 0 {
- idxN := absolute(nafVar[i]) >> 1
- S := TabQ[idxN]
- if nafVar[i] < 0 {
- S.neg()
- }
- P.add(&S)
- }
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
deleted file mode 100644
index 374a6950..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package ed25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-type (
- pointR1 struct{ x, y, z, ta, tb fp.Elt }
- pointR2 struct {
- pointR3
- z2 fp.Elt
- }
-)
-type pointR3 struct{ addYX, subYX, dt2 fp.Elt }
-
-func (P *pointR1) neg() {
- fp.Neg(&P.x, &P.x)
- fp.Neg(&P.ta, &P.ta)
-}
-
-func (P *pointR1) SetIdentity() {
- P.x = fp.Elt{}
- fp.SetOne(&P.y)
- fp.SetOne(&P.z)
- P.ta = fp.Elt{}
- P.tb = fp.Elt{}
-}
-
-func (P *pointR1) toAffine() {
- fp.Inv(&P.z, &P.z)
- fp.Mul(&P.x, &P.x, &P.z)
- fp.Mul(&P.y, &P.y, &P.z)
- fp.Modp(&P.x)
- fp.Modp(&P.y)
- fp.SetOne(&P.z)
- P.ta = P.x
- P.tb = P.y
-}
-
-func (P *pointR1) ToBytes(k []byte) error {
- P.toAffine()
- var x [fp.Size]byte
- err := fp.ToBytes(k[:fp.Size], &P.y)
- if err != nil {
- return err
- }
- err = fp.ToBytes(x[:], &P.x)
- if err != nil {
- return err
- }
- b := x[0] & 1
- k[paramB-1] = k[paramB-1] | (b << 7)
- return nil
-}
-
-func (P *pointR1) FromBytes(k []byte) bool {
- if len(k) != paramB {
- panic("wrong size")
- }
- signX := k[paramB-1] >> 7
- copy(P.y[:], k[:fp.Size])
- P.y[fp.Size-1] &= 0x7F
- p := fp.P()
- if !isLessThan(P.y[:], p[:]) {
- return false
- }
-
- one, u, v := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- fp.SetOne(one)
- fp.Sqr(u, &P.y) // u = y^2
- fp.Mul(v, u, ¶mD) // v = dy^2
- fp.Sub(u, u, one) // u = y^2-1
- fp.Add(v, v, one) // v = dy^2+1
- isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v)
- if !isQR {
- return false
- }
- fp.Modp(&P.x) // x = x mod p
- if fp.IsZero(&P.x) && signX == 1 {
- return false
- }
- if signX != (P.x[0] & 1) {
- fp.Neg(&P.x, &P.x)
- }
- P.ta = P.x
- P.tb = P.y
- fp.SetOne(&P.z)
- return true
-}
-
-// double calculates 2P for curves with A=-1.
-func (P *pointR1) double() {
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Add(h, a, b) // H = A+B
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, h) // E = (x+y)^2-A-B
- fp.Sub(g, b, a) // G = B-A
- fp.Sub(f, c, g) // F = C-G
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *pointR1) mixAdd(Q *pointR3) {
- fp.Add(&P.z, &P.z, &P.z) // D = 2*z1
- P.coreAddition(Q)
-}
-
-func (P *pointR1) add(Q *pointR2) {
- fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2
- P.coreAddition(&Q.pointR3)
-}
-
-// coreAddition calculates P=P+Q for curves with A=-1.
-func (P *pointR1) coreAddition(Q *pointR3) {
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2
- a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb
- fp.Mul(c, Pta, Ptb) // t1 = ta*tb
- fp.Sub(h, Py, Px) // y1-x1
- fp.Add(b, Py, Px) // y1+x1
- fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2)
- fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2)
- fp.Mul(c, c, dt2) // C = 2*D*t1*t2
- fp.Sub(e, b, a) // E = B-A
- fp.Add(h, b, a) // H = B+A
- fp.Sub(f, d, c) // F = D-C
- fp.Add(g, d, c) // G = D+C
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *pointR1) oddMultiples(T []pointR2) {
- var R pointR2
- n := len(T)
- T[0].fromR1(P)
- _2P := *P
- _2P.double()
- R.fromR1(&_2P)
- for i := 1; i < n; i++ {
- P.add(&R)
- T[i].fromR1(P)
- }
-}
-
-func (P *pointR1) isEqual(Q *pointR1) bool {
- l, r := &fp.Elt{}, &fp.Elt{}
- fp.Mul(l, &P.x, &Q.z)
- fp.Mul(r, &Q.x, &P.z)
- fp.Sub(l, l, r)
- b := fp.IsZero(l)
- fp.Mul(l, &P.y, &Q.z)
- fp.Mul(r, &Q.y, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- fp.Mul(l, &P.ta, &P.tb)
- fp.Mul(l, l, &Q.z)
- fp.Mul(r, &Q.ta, &Q.tb)
- fp.Mul(r, r, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- return b
-}
-
-func (P *pointR3) neg() {
- P.addYX, P.subYX = P.subYX, P.addYX
- fp.Neg(&P.dt2, &P.dt2)
-}
-
-func (P *pointR2) fromR1(Q *pointR1) {
- fp.Add(&P.addYX, &Q.y, &Q.x)
- fp.Sub(&P.subYX, &Q.y, &Q.x)
- fp.Mul(&P.dt2, &Q.ta, &Q.tb)
- fp.Mul(&P.dt2, &P.dt2, ¶mD)
- fp.Add(&P.dt2, &P.dt2, &P.dt2)
- fp.Add(&P.z2, &Q.z, &Q.z)
-}
-
-func (P *pointR3) cneg(b int) {
- t := &fp.Elt{}
- fp.Cswap(&P.addYX, &P.subYX, uint(b))
- fp.Neg(t, &P.dt2)
- fp.Cmov(&P.dt2, t, uint(b))
-}
-
-func (P *pointR3) cmov(Q *pointR3, b int) {
- fp.Cmov(&P.addYX, &Q.addYX, uint(b))
- fp.Cmov(&P.subYX, &Q.subYX, uint(b))
- fp.Cmov(&P.dt2, &Q.dt2, uint(b))
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go
deleted file mode 100644
index c3505b67..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build go1.13
-// +build go1.13
-
-package ed25519
-
-import cryptoEd25519 "crypto/ed25519"
-
-// PublicKey is the type of Ed25519 public keys.
-type PublicKey cryptoEd25519.PublicKey
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go
deleted file mode 100644
index d57d86ef..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build !go1.13
-// +build !go1.13
-
-package ed25519
-
-// PublicKey is the type of Ed25519 public keys.
-type PublicKey []byte
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go
deleted file mode 100644
index e4520f52..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package ed25519
-
-import (
- "crypto/rand"
- "encoding/asn1"
-
- "github.com/cloudflare/circl/sign"
-)
-
-var sch sign.Scheme = &scheme{}
-
-// Scheme returns a signature interface.
-func Scheme() sign.Scheme { return sch }
-
-type scheme struct{}
-
-func (*scheme) Name() string { return "Ed25519" }
-func (*scheme) PublicKeySize() int { return PublicKeySize }
-func (*scheme) PrivateKeySize() int { return PrivateKeySize }
-func (*scheme) SignatureSize() int { return SignatureSize }
-func (*scheme) SeedSize() int { return SeedSize }
-func (*scheme) TLSIdentifier() uint { return 0x0807 }
-func (*scheme) SupportsContext() bool { return false }
-func (*scheme) Oid() asn1.ObjectIdentifier {
- return asn1.ObjectIdentifier{1, 3, 101, 112}
-}
-
-func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) {
- return GenerateKey(rand.Reader)
-}
-
-func (*scheme) Sign(
- sk sign.PrivateKey,
- message []byte,
- opts *sign.SignatureOpts,
-) []byte {
- priv, ok := sk.(PrivateKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- if opts != nil && opts.Context != "" {
- panic(sign.ErrContextNotSupported)
- }
- return Sign(priv, message)
-}
-
-func (*scheme) Verify(
- pk sign.PublicKey,
- message, signature []byte,
- opts *sign.SignatureOpts,
-) bool {
- pub, ok := pk.(PublicKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- if opts != nil {
- if opts.Context != "" {
- panic(sign.ErrContextNotSupported)
- }
- }
- return Verify(pub, message, signature)
-}
-
-func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) {
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
- return publicKey, privateKey
-}
-
-func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) {
- if len(buf) < PublicKeySize {
- return nil, sign.ErrPubKeySize
- }
- pub := make(PublicKey, PublicKeySize)
- copy(pub, buf[:PublicKeySize])
- return pub, nil
-}
-
-func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) {
- if len(buf) < PrivateKeySize {
- return nil, sign.ErrPrivKeySize
- }
- priv := make(PrivateKey, PrivateKeySize)
- copy(priv, buf[:PrivateKeySize])
- return priv, nil
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go
deleted file mode 100644
index 8763b426..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package ed25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-var tabSign = [fxV][fx2w1]pointR3{
- {
- pointR3{
- addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07},
- subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44},
- dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f},
- },
- {
- addYX: fp.Elt{0x7c, 0xb0, 0x9e, 0xe6, 0xc5, 0xbf, 0xfa, 0x13, 0x8e, 0x0d, 0x22, 0xde, 0xc8, 0xd1, 0xce, 0x52, 0x02, 0xd5, 0x62, 0x31, 0x71, 0x0e, 0x8e, 0x9d, 0xb0, 0xd6, 0x00, 0xa5, 0x5a, 0x0e, 0xce, 0x72},
- subYX: fp.Elt{0x1a, 0x8e, 0x5c, 0xdc, 0xa4, 0xb3, 0x6c, 0x51, 0x18, 0xa0, 0x09, 0x80, 0x9a, 0x46, 0x33, 0xd5, 0xe0, 0x3c, 0x4d, 0x3b, 0xfc, 0x49, 0xa2, 0x43, 0x29, 0xe1, 0x29, 0xa9, 0x93, 0xea, 0x7c, 0x35},
- dt2: fp.Elt{0x08, 0x46, 0x6f, 0x68, 0x7f, 0x0b, 0x7c, 0x9e, 0xad, 0xba, 0x07, 0x61, 0x74, 0x83, 0x2f, 0xfc, 0x26, 0xd6, 0x09, 0xb9, 0x00, 0x34, 0x36, 0x4f, 0x01, 0xf3, 0x48, 0xdb, 0x43, 0xba, 0x04, 0x44},
- },
- {
- addYX: fp.Elt{0x4c, 0xda, 0x0d, 0x13, 0x66, 0xfd, 0x82, 0x84, 0x9f, 0x75, 0x5b, 0xa2, 0x17, 0xfe, 0x34, 0xbf, 0x1f, 0xcb, 0xba, 0x90, 0x55, 0x80, 0x83, 0xfd, 0x63, 0xb9, 0x18, 0xf8, 0x5b, 0x5d, 0x94, 0x1e},
- subYX: fp.Elt{0xb9, 0xdb, 0x6c, 0x04, 0x88, 0x22, 0xd8, 0x79, 0x83, 0x2f, 0x8d, 0x65, 0x6b, 0xd2, 0xab, 0x1b, 0xdd, 0x65, 0xe5, 0x93, 0x63, 0xf8, 0xa2, 0xd8, 0x3c, 0xf1, 0x4b, 0xc5, 0x99, 0xd1, 0xf2, 0x12},
- dt2: fp.Elt{0x05, 0x4c, 0xb8, 0x3b, 0xfe, 0xf5, 0x9f, 0x2e, 0xd1, 0xb2, 0xb8, 0xff, 0xfe, 0x6d, 0xd9, 0x37, 0xe0, 0xae, 0xb4, 0x5a, 0x51, 0x80, 0x7e, 0x9b, 0x1d, 0xd1, 0x8d, 0x8c, 0x56, 0xb1, 0x84, 0x35},
- },
- {
- addYX: fp.Elt{0x39, 0x71, 0x43, 0x34, 0xe3, 0x42, 0x45, 0xa1, 0xf2, 0x68, 0x71, 0xa7, 0xe8, 0x23, 0xfd, 0x9f, 0x86, 0x48, 0xff, 0xe5, 0x96, 0x74, 0xcf, 0x05, 0x49, 0xe2, 0xb3, 0x6c, 0x17, 0x77, 0x2f, 0x6d},
- subYX: fp.Elt{0x73, 0x3f, 0xc1, 0xc7, 0x6a, 0x66, 0xa1, 0x20, 0xdd, 0x11, 0xfb, 0x7a, 0x6e, 0xa8, 0x51, 0xb8, 0x3f, 0x9d, 0xa2, 0x97, 0x84, 0xb5, 0xc7, 0x90, 0x7c, 0xab, 0x48, 0xd6, 0x84, 0xa3, 0xd5, 0x1a},
- dt2: fp.Elt{0x63, 0x27, 0x3c, 0x49, 0x4b, 0xfc, 0x22, 0xf2, 0x0b, 0x50, 0xc2, 0x0f, 0xb4, 0x1f, 0x31, 0x0c, 0x2f, 0x53, 0xab, 0xaa, 0x75, 0x6f, 0xe0, 0x69, 0x39, 0x56, 0xe0, 0x3b, 0xb7, 0xa8, 0xbf, 0x45},
- },
- },
- {
- {
- addYX: fp.Elt{0x00, 0x45, 0xd9, 0x0d, 0x58, 0x03, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0x0a, 0x13, 0xd5, 0xa1, 0x06, 0xb7, 0x1a, 0x15, 0x6b, 0x41},
- subYX: fp.Elt{0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0x0a, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0x0d},
- dt2: fp.Elt{0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x07, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19},
- },
- {
- addYX: fp.Elt{0x6a, 0x6d, 0x6d, 0xd1, 0xfa, 0xf5, 0x03, 0x30, 0xbd, 0x6d, 0xc2, 0xc8, 0xf5, 0x38, 0x80, 0x4f, 0xb2, 0xbe, 0xa1, 0x76, 0x50, 0x1a, 0x73, 0xf2, 0x78, 0x2b, 0x8e, 0x3a, 0x1e, 0x34, 0x47, 0x7b},
- subYX: fp.Elt{0xc3, 0x2c, 0x36, 0xdc, 0xc5, 0x45, 0xbc, 0xef, 0x1b, 0x64, 0xd6, 0x65, 0x28, 0xe9, 0xda, 0x84, 0x13, 0xbe, 0x27, 0x8e, 0x3f, 0x98, 0x2a, 0x37, 0xee, 0x78, 0x97, 0xd6, 0xc0, 0x6f, 0xb4, 0x53},
- dt2: fp.Elt{0x58, 0x5d, 0xa7, 0xa3, 0x68, 0xbb, 0x20, 0x30, 0x2e, 0x03, 0xe9, 0xb1, 0xd4, 0x90, 0x72, 0xe3, 0x71, 0xb2, 0x36, 0x3e, 0x73, 0xa0, 0x2e, 0x3d, 0xd1, 0x85, 0x33, 0x62, 0x4e, 0xa7, 0x7b, 0x31},
- },
- {
- addYX: fp.Elt{0xbf, 0xc4, 0x38, 0x53, 0xfb, 0x68, 0xa9, 0x77, 0xce, 0x55, 0xf9, 0x05, 0xcb, 0xeb, 0xfb, 0x8c, 0x46, 0xc2, 0x32, 0x7c, 0xf0, 0xdb, 0xd7, 0x2c, 0x62, 0x8e, 0xdd, 0x54, 0x75, 0xcf, 0x3f, 0x33},
- subYX: fp.Elt{0x49, 0x50, 0x1f, 0x4e, 0x6e, 0x55, 0x55, 0xde, 0x8c, 0x4e, 0x77, 0x96, 0x38, 0x3b, 0xfe, 0xb6, 0x43, 0x3c, 0x86, 0x69, 0xc2, 0x72, 0x66, 0x1f, 0x6b, 0xf9, 0x87, 0xbc, 0x4f, 0x37, 0x3e, 0x3c},
- dt2: fp.Elt{0xd2, 0x2f, 0x06, 0x6b, 0x08, 0x07, 0x69, 0x77, 0xc0, 0x94, 0xcc, 0xae, 0x43, 0x00, 0x59, 0x6e, 0xa3, 0x63, 0xa8, 0xdd, 0xfa, 0x24, 0x18, 0xd0, 0x35, 0xc7, 0x78, 0xf7, 0x0d, 0xd4, 0x5a, 0x1e},
- },
- {
- addYX: fp.Elt{0x45, 0xc1, 0x17, 0x51, 0xf8, 0xed, 0x7e, 0xc7, 0xa9, 0x1a, 0x11, 0x6e, 0x2d, 0xef, 0x0b, 0xd5, 0x3f, 0x98, 0xb0, 0xa3, 0x9d, 0x65, 0xf1, 0xcd, 0x53, 0x4a, 0x8a, 0x18, 0x70, 0x0a, 0x7f, 0x23},
- subYX: fp.Elt{0xdd, 0xef, 0xbe, 0x3a, 0x31, 0xe0, 0xbc, 0xbe, 0x6d, 0x5d, 0x79, 0x87, 0xd6, 0xbe, 0x68, 0xe3, 0x59, 0x76, 0x8c, 0x86, 0x0e, 0x7a, 0x92, 0x13, 0x14, 0x8f, 0x67, 0xb3, 0xcb, 0x1a, 0x76, 0x76},
- dt2: fp.Elt{0x56, 0x7a, 0x1c, 0x9d, 0xca, 0x96, 0xf9, 0xf9, 0x03, 0x21, 0xd4, 0xe8, 0xb3, 0xd5, 0xe9, 0x52, 0xc8, 0x54, 0x1e, 0x1b, 0x13, 0xb6, 0xfd, 0x47, 0x7d, 0x02, 0x32, 0x33, 0x27, 0xe2, 0x1f, 0x19},
- },
- },
-}
-
-var tabVerif = [1 << (omegaFix - 2)]pointR3{
- { /* 1P */
- addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07},
- subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44},
- dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f},
- },
- { /* 3P */
- addYX: fp.Elt{0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x02, 0x32, 0x67, 0x01, 0x9f, 0x02, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a},
- subYX: fp.Elt{0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a},
- dt2: fp.Elt{0x89, 0xd8, 0xd0, 0x0d, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a},
- },
- { /* 5P */
- addYX: fp.Elt{0x33, 0xbb, 0xa5, 0x08, 0x44, 0xbc, 0x12, 0xa2, 0x02, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0x0c, 0xeb, 0x1b, 0xdd, 0xeb, 0x06, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29},
- subYX: fp.Elt{0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x00, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15},
- dt2: fp.Elt{0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x07, 0x08, 0x0e, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0x0b, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43},
- },
- { /* 7P */
- addYX: fp.Elt{0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46},
- subYX: fp.Elt{0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d},
- dt2: fp.Elt{0xa2, 0xb3, 0xb8, 0x01, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x05, 0x47, 0x5f, 0x03, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0x0f, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a},
- },
- { /* 9P */
- addYX: fp.Elt{0x2f, 0x63, 0xa8, 0xa6, 0x8a, 0x67, 0x2e, 0x9b, 0xc5, 0x46, 0xbc, 0x51, 0x6f, 0x9e, 0x50, 0xa6, 0xb5, 0xf5, 0x86, 0xc6, 0xc9, 0x33, 0xb2, 0xce, 0x59, 0x7f, 0xdd, 0x8a, 0x33, 0xed, 0xb9, 0x34},
- subYX: fp.Elt{0x64, 0x80, 0x9d, 0x03, 0x7e, 0x21, 0x6e, 0xf3, 0x9b, 0x41, 0x20, 0xf5, 0xb6, 0x81, 0xa0, 0x98, 0x44, 0xb0, 0x5e, 0xe7, 0x08, 0xc6, 0xcb, 0x96, 0x8f, 0x9c, 0xdc, 0xfa, 0x51, 0x5a, 0xc0, 0x49},
- dt2: fp.Elt{0x1b, 0xaf, 0x45, 0x90, 0xbf, 0xe8, 0xb4, 0x06, 0x2f, 0xd2, 0x19, 0xa7, 0xe8, 0x83, 0xff, 0xe2, 0x16, 0xcf, 0xd4, 0x93, 0x29, 0xfc, 0xf6, 0xaa, 0x06, 0x8b, 0x00, 0x1b, 0x02, 0x72, 0xc1, 0x73},
- },
- { /* 11P */
- addYX: fp.Elt{0xde, 0x2a, 0x80, 0x8a, 0x84, 0x00, 0xbf, 0x2f, 0x27, 0x2e, 0x30, 0x02, 0xcf, 0xfe, 0xd9, 0xe5, 0x06, 0x34, 0x70, 0x17, 0x71, 0x84, 0x3e, 0x11, 0xaf, 0x8f, 0x6d, 0x54, 0xe2, 0xaa, 0x75, 0x42},
- subYX: fp.Elt{0x48, 0x43, 0x86, 0x49, 0x02, 0x5b, 0x5f, 0x31, 0x81, 0x83, 0x08, 0x77, 0x69, 0xb3, 0xd6, 0x3e, 0x95, 0xeb, 0x8d, 0x6a, 0x55, 0x75, 0xa0, 0xa3, 0x7f, 0xc7, 0xd5, 0x29, 0x80, 0x59, 0xab, 0x18},
- dt2: fp.Elt{0xe9, 0x89, 0x60, 0xfd, 0xc5, 0x2c, 0x2b, 0xd8, 0xa4, 0xe4, 0x82, 0x32, 0xa1, 0xb4, 0x1e, 0x03, 0x22, 0x86, 0x1a, 0xb5, 0x99, 0x11, 0x31, 0x44, 0x48, 0xf9, 0x3d, 0xb5, 0x22, 0x55, 0xc6, 0x3d},
- },
- { /* 13P */
- addYX: fp.Elt{0x6d, 0x7f, 0x00, 0xa2, 0x22, 0xc2, 0x70, 0xbf, 0xdb, 0xde, 0xbc, 0xb5, 0x9a, 0xb3, 0x84, 0xbf, 0x07, 0xba, 0x07, 0xfb, 0x12, 0x0e, 0x7a, 0x53, 0x41, 0xf2, 0x46, 0xc3, 0xee, 0xd7, 0x4f, 0x23},
- subYX: fp.Elt{0x93, 0xbf, 0x7f, 0x32, 0x3b, 0x01, 0x6f, 0x50, 0x6b, 0x6f, 0x77, 0x9b, 0xc9, 0xeb, 0xfc, 0xae, 0x68, 0x59, 0xad, 0xaa, 0x32, 0xb2, 0x12, 0x9d, 0xa7, 0x24, 0x60, 0x17, 0x2d, 0x88, 0x67, 0x02},
- dt2: fp.Elt{0x78, 0xa3, 0x2e, 0x73, 0x19, 0xa1, 0x60, 0x53, 0x71, 0xd4, 0x8d, 0xdf, 0xb1, 0xe6, 0x37, 0x24, 0x33, 0xe5, 0xa7, 0x91, 0xf8, 0x37, 0xef, 0xa2, 0x63, 0x78, 0x09, 0xaa, 0xfd, 0xa6, 0x7b, 0x49},
- },
- { /* 15P */
- addYX: fp.Elt{0xa0, 0xea, 0xcf, 0x13, 0x03, 0xcc, 0xce, 0x24, 0x6d, 0x24, 0x9c, 0x18, 0x8d, 0xc2, 0x48, 0x86, 0xd0, 0xd4, 0xf2, 0xc1, 0xfa, 0xbd, 0xbd, 0x2d, 0x2b, 0xe7, 0x2d, 0xf1, 0x17, 0x29, 0xe2, 0x61},
- subYX: fp.Elt{0x0b, 0xcf, 0x8c, 0x46, 0x86, 0xcd, 0x0b, 0x04, 0xd6, 0x10, 0x99, 0x2a, 0xa4, 0x9b, 0x82, 0xd3, 0x92, 0x51, 0xb2, 0x07, 0x08, 0x30, 0x08, 0x75, 0xbf, 0x5e, 0xd0, 0x18, 0x42, 0xcd, 0xb5, 0x43},
- dt2: fp.Elt{0x16, 0xb5, 0xd0, 0x9b, 0x2f, 0x76, 0x9a, 0x5d, 0xee, 0xde, 0x3f, 0x37, 0x4e, 0xaf, 0x38, 0xeb, 0x70, 0x42, 0xd6, 0x93, 0x7d, 0x5a, 0x2e, 0x03, 0x42, 0xd8, 0xe4, 0x0a, 0x21, 0x61, 0x1d, 0x51},
- },
- { /* 17P */
- addYX: fp.Elt{0x81, 0x9d, 0x0e, 0x95, 0xef, 0x76, 0xc6, 0x92, 0x4f, 0x04, 0xd7, 0xc0, 0xcd, 0x20, 0x46, 0xa5, 0x48, 0x12, 0x8f, 0x6f, 0x64, 0x36, 0x9b, 0xaa, 0xe3, 0x55, 0xb8, 0xdd, 0x24, 0x59, 0x32, 0x6d},
- subYX: fp.Elt{0x87, 0xde, 0x20, 0x44, 0x48, 0x86, 0x13, 0x08, 0xb4, 0xed, 0x92, 0xb5, 0x16, 0xf0, 0x1c, 0x8a, 0x25, 0x2d, 0x94, 0x29, 0x27, 0x4e, 0xfa, 0x39, 0x10, 0x28, 0x48, 0xe2, 0x6f, 0xfe, 0xa7, 0x71},
- dt2: fp.Elt{0x54, 0xc8, 0xc8, 0xa5, 0xb8, 0x82, 0x71, 0x6c, 0x03, 0x2a, 0x5f, 0xfe, 0x79, 0x14, 0xfd, 0x33, 0x0c, 0x8d, 0x77, 0x83, 0x18, 0x59, 0xcf, 0x72, 0xa9, 0xea, 0x9e, 0x55, 0xb6, 0xc4, 0x46, 0x47},
- },
- { /* 19P */
- addYX: fp.Elt{0x2b, 0x9a, 0xc6, 0x6d, 0x3c, 0x7b, 0x77, 0xd3, 0x17, 0xf6, 0x89, 0x6f, 0x27, 0xb2, 0xfa, 0xde, 0xb5, 0x16, 0x3a, 0xb5, 0xf7, 0x1c, 0x65, 0x45, 0xb7, 0x9f, 0xfe, 0x34, 0xde, 0x51, 0x9a, 0x5c},
- subYX: fp.Elt{0x47, 0x11, 0x74, 0x64, 0xc8, 0x46, 0x85, 0x34, 0x49, 0xc8, 0xfc, 0x0e, 0xdd, 0xae, 0x35, 0x7d, 0x32, 0xa3, 0x72, 0x06, 0x76, 0x9a, 0x93, 0xff, 0xd6, 0xe6, 0xb5, 0x7d, 0x49, 0x63, 0x96, 0x21},
- dt2: fp.Elt{0x67, 0x0e, 0xf1, 0x79, 0xcf, 0xf1, 0x10, 0xf5, 0x5b, 0x51, 0x58, 0xe6, 0xa1, 0xda, 0xdd, 0xff, 0x77, 0x22, 0x14, 0x10, 0x17, 0xa7, 0xc3, 0x09, 0xbb, 0x23, 0x82, 0x60, 0x3c, 0x50, 0x04, 0x48},
- },
- { /* 21P */
- addYX: fp.Elt{0xc7, 0x7f, 0xa3, 0x2c, 0xd0, 0x9e, 0x24, 0xc4, 0xab, 0xac, 0x15, 0xa6, 0xe3, 0xa0, 0x59, 0xa0, 0x23, 0x0e, 0x6e, 0xc9, 0xd7, 0x6e, 0xa9, 0x88, 0x6d, 0x69, 0x50, 0x16, 0xa5, 0x98, 0x33, 0x55},
- subYX: fp.Elt{0x75, 0xd1, 0x36, 0x3a, 0xd2, 0x21, 0x68, 0x3b, 0x32, 0x9e, 0x9b, 0xe9, 0xa7, 0x0a, 0xb4, 0xbb, 0x47, 0x8a, 0x83, 0x20, 0xe4, 0x5c, 0x9e, 0x5d, 0x5e, 0x4c, 0xde, 0x58, 0x88, 0x09, 0x1e, 0x77},
- dt2: fp.Elt{0xdf, 0x1e, 0x45, 0x78, 0xd2, 0xf5, 0x12, 0x9a, 0xcb, 0x9c, 0x89, 0x85, 0x79, 0x5d, 0xda, 0x3a, 0x08, 0x95, 0xa5, 0x9f, 0x2d, 0x4a, 0x7f, 0x47, 0x11, 0xa6, 0xf5, 0x8f, 0xd6, 0xd1, 0x5e, 0x5a},
- },
- { /* 23P */
- addYX: fp.Elt{0x83, 0x0e, 0x15, 0xfe, 0x2a, 0x12, 0x95, 0x11, 0xd8, 0x35, 0x4b, 0x7e, 0x25, 0x9a, 0x20, 0xcf, 0x20, 0x1e, 0x71, 0x1e, 0x29, 0xf8, 0x87, 0x73, 0xf0, 0x92, 0xbf, 0xd8, 0x97, 0xb8, 0xac, 0x44},
- subYX: fp.Elt{0x59, 0x73, 0x52, 0x58, 0xc5, 0xe0, 0xe5, 0xba, 0x7e, 0x9d, 0xdb, 0xca, 0x19, 0x5c, 0x2e, 0x39, 0xe9, 0xab, 0x1c, 0xda, 0x1e, 0x3c, 0x65, 0x28, 0x44, 0xdc, 0xef, 0x5f, 0x13, 0x60, 0x9b, 0x01},
- dt2: fp.Elt{0x83, 0x4b, 0x13, 0x5e, 0x14, 0x68, 0x60, 0x1e, 0x16, 0x4c, 0x30, 0x24, 0x4f, 0xe6, 0xf5, 0xc4, 0xd7, 0x3e, 0x1a, 0xfc, 0xa8, 0x88, 0x6e, 0x50, 0x92, 0x2f, 0xad, 0xe6, 0xfd, 0x49, 0x0c, 0x15},
- },
- { /* 25P */
- addYX: fp.Elt{0x38, 0x11, 0x47, 0x09, 0x95, 0xf2, 0x7b, 0x8e, 0x51, 0xa6, 0x75, 0x4f, 0x39, 0xef, 0x6f, 0x5d, 0xad, 0x08, 0xa7, 0x25, 0xc4, 0x79, 0xaf, 0x10, 0x22, 0x99, 0xb9, 0x5b, 0x07, 0x5a, 0x2b, 0x6b},
- subYX: fp.Elt{0x68, 0xa8, 0xdc, 0x9c, 0x3c, 0x86, 0x49, 0xb8, 0xd0, 0x4a, 0x71, 0xb8, 0xdb, 0x44, 0x3f, 0xc8, 0x8d, 0x16, 0x36, 0x0c, 0x56, 0xe3, 0x3e, 0xfe, 0xc1, 0xfb, 0x05, 0x1e, 0x79, 0xd7, 0xa6, 0x78},
- dt2: fp.Elt{0x76, 0xb9, 0xa0, 0x47, 0x4b, 0x70, 0xbf, 0x58, 0xd5, 0x48, 0x17, 0x74, 0x55, 0xb3, 0x01, 0xa6, 0x90, 0xf5, 0x42, 0xd5, 0xb1, 0x1f, 0x2b, 0xaa, 0x00, 0x5d, 0xd5, 0x4a, 0xfc, 0x7f, 0x5c, 0x72},
- },
- { /* 27P */
- addYX: fp.Elt{0xb2, 0x99, 0xcf, 0xd1, 0x15, 0x67, 0x42, 0xe4, 0x34, 0x0d, 0xa2, 0x02, 0x11, 0xd5, 0x52, 0x73, 0x9f, 0x10, 0x12, 0x8b, 0x7b, 0x15, 0xd1, 0x23, 0xa3, 0xf3, 0xb1, 0x7c, 0x27, 0xc9, 0x4c, 0x79},
- subYX: fp.Elt{0xc0, 0x98, 0xd0, 0x1c, 0xf7, 0x2b, 0x80, 0x91, 0x66, 0x63, 0x5e, 0xed, 0xa4, 0x6c, 0x41, 0xfe, 0x4c, 0x99, 0x02, 0x49, 0x71, 0x5d, 0x58, 0xdf, 0xe7, 0xfa, 0x55, 0xf8, 0x25, 0x46, 0xd5, 0x4c},
- dt2: fp.Elt{0x53, 0x50, 0xac, 0xc2, 0x26, 0xc4, 0xf6, 0x4a, 0x58, 0x72, 0xf6, 0x32, 0xad, 0xed, 0x9a, 0xbc, 0x21, 0x10, 0x31, 0x0a, 0xf1, 0x32, 0xd0, 0x2a, 0x85, 0x8e, 0xcc, 0x6f, 0x7b, 0x35, 0x08, 0x70},
- },
- { /* 29P */
- addYX: fp.Elt{0x01, 0x3f, 0x77, 0x38, 0x27, 0x67, 0x88, 0x0b, 0xfb, 0xcc, 0xfb, 0x95, 0xfa, 0xc8, 0xcc, 0xb8, 0xb6, 0x29, 0xad, 0xb9, 0xa3, 0xd5, 0x2d, 0x8d, 0x6a, 0x0f, 0xad, 0x51, 0x98, 0x7e, 0xef, 0x06},
- subYX: fp.Elt{0x34, 0x4a, 0x58, 0x82, 0xbb, 0x9f, 0x1b, 0xd0, 0x2b, 0x79, 0xb4, 0xd2, 0x63, 0x64, 0xab, 0x47, 0x02, 0x62, 0x53, 0x48, 0x9c, 0x63, 0x31, 0xb6, 0x28, 0xd4, 0xd6, 0x69, 0x36, 0x2a, 0xa9, 0x13},
- dt2: fp.Elt{0xe5, 0x7d, 0x57, 0xc0, 0x1c, 0x77, 0x93, 0xca, 0x5c, 0xdc, 0x35, 0x50, 0x1e, 0xe4, 0x40, 0x75, 0x71, 0xe0, 0x02, 0xd8, 0x01, 0x0f, 0x68, 0x24, 0x6a, 0xf8, 0x2a, 0x8a, 0xdf, 0x6d, 0x29, 0x3c},
- },
- { /* 31P */
- addYX: fp.Elt{0x13, 0xa7, 0x14, 0xd9, 0xf9, 0x15, 0xad, 0xae, 0x12, 0xf9, 0x8f, 0x8c, 0xf9, 0x7b, 0x2f, 0xa9, 0x30, 0xd7, 0x53, 0x9f, 0x17, 0x23, 0xf8, 0xaf, 0xba, 0x77, 0x0c, 0x49, 0x93, 0xd3, 0x99, 0x7a},
- subYX: fp.Elt{0x41, 0x25, 0x1f, 0xbb, 0x2e, 0x4d, 0xeb, 0xfc, 0x1f, 0xb9, 0xad, 0x40, 0xc7, 0x10, 0x95, 0xb8, 0x05, 0xad, 0xa1, 0xd0, 0x7d, 0xa3, 0x71, 0xfc, 0x7b, 0x71, 0x47, 0x07, 0x70, 0x2c, 0x89, 0x0a},
- dt2: fp.Elt{0xe8, 0xa3, 0xbd, 0x36, 0x24, 0xed, 0x52, 0x8f, 0x94, 0x07, 0xe8, 0x57, 0x41, 0xc8, 0xa8, 0x77, 0xe0, 0x9c, 0x2f, 0x26, 0x63, 0x65, 0xa9, 0xa5, 0xd2, 0xf7, 0x02, 0x83, 0xd2, 0x62, 0x67, 0x28},
- },
- { /* 33P */
- addYX: fp.Elt{0x25, 0x5b, 0xe3, 0x3c, 0x09, 0x36, 0x78, 0x4e, 0x97, 0xaa, 0x6b, 0xb2, 0x1d, 0x18, 0xe1, 0x82, 0x3f, 0xb8, 0xc7, 0xcb, 0xd3, 0x92, 0xc1, 0x0c, 0x3a, 0x9d, 0x9d, 0x6a, 0x04, 0xda, 0xf1, 0x32},
- subYX: fp.Elt{0xbd, 0xf5, 0x2e, 0xce, 0x2b, 0x8e, 0x55, 0x7c, 0x63, 0xbc, 0x47, 0x67, 0xb4, 0x6c, 0x98, 0xe4, 0xb8, 0x89, 0xbb, 0x3b, 0x9f, 0x17, 0x4a, 0x15, 0x7a, 0x76, 0xf1, 0xd6, 0xa3, 0xf2, 0x86, 0x76},
- dt2: fp.Elt{0x6a, 0x7c, 0x59, 0x6d, 0xa6, 0x12, 0x8d, 0xaa, 0x2b, 0x85, 0xd3, 0x04, 0x03, 0x93, 0x11, 0x8f, 0x22, 0xb0, 0x09, 0xc2, 0x73, 0xdc, 0x91, 0x3f, 0xa6, 0x28, 0xad, 0xa9, 0xf8, 0x05, 0x13, 0x56},
- },
- { /* 35P */
- addYX: fp.Elt{0xd1, 0xae, 0x92, 0xec, 0x8d, 0x97, 0x0c, 0x10, 0xe5, 0x73, 0x6d, 0x4d, 0x43, 0xd5, 0x43, 0xca, 0x48, 0xba, 0x47, 0xd8, 0x22, 0x1b, 0x13, 0x83, 0x2c, 0x4d, 0x5d, 0xe3, 0x53, 0xec, 0xaa},
- subYX: fp.Elt{0xd5, 0xc0, 0xb0, 0xe7, 0x28, 0xcc, 0x22, 0x67, 0x53, 0x5c, 0x07, 0xdb, 0xbb, 0xe9, 0x9d, 0x70, 0x61, 0x0a, 0x01, 0xd7, 0xa7, 0x8d, 0xf6, 0xca, 0x6c, 0xcc, 0x57, 0x2c, 0xef, 0x1a, 0x0a, 0x03},
- dt2: fp.Elt{0xaa, 0xd2, 0x3a, 0x00, 0x73, 0xf7, 0xb1, 0x7b, 0x08, 0x66, 0x21, 0x2b, 0x80, 0x29, 0x3f, 0x0b, 0x3e, 0xd2, 0x0e, 0x52, 0x86, 0xdc, 0x21, 0x78, 0x80, 0x54, 0x06, 0x24, 0x1c, 0x9c, 0xbe, 0x20},
- },
- { /* 37P */
- addYX: fp.Elt{0xa6, 0x73, 0x96, 0x24, 0xd8, 0x87, 0x53, 0xe1, 0x93, 0xe4, 0x46, 0xf5, 0x2d, 0xbc, 0x43, 0x59, 0xb5, 0x63, 0x6f, 0xc3, 0x81, 0x9a, 0x7f, 0x1c, 0xde, 0xc1, 0x0a, 0x1f, 0x36, 0xb3, 0x0a, 0x75},
- subYX: fp.Elt{0x60, 0x5e, 0x02, 0xe2, 0x4a, 0xe4, 0xe0, 0x20, 0x38, 0xb9, 0xdc, 0xcb, 0x2f, 0x3b, 0x3b, 0xb0, 0x1c, 0x0d, 0x5a, 0xf9, 0x9c, 0x63, 0x5d, 0x10, 0x11, 0xe3, 0x67, 0x50, 0x54, 0x4c, 0x76, 0x69},
- dt2: fp.Elt{0x37, 0x10, 0xf8, 0xa2, 0x83, 0x32, 0x8a, 0x1e, 0xf1, 0xcb, 0x7f, 0xbd, 0x23, 0xda, 0x2e, 0x6f, 0x63, 0x25, 0x2e, 0xac, 0x5b, 0xd1, 0x2f, 0xb7, 0x40, 0x50, 0x07, 0xb7, 0x3f, 0x6b, 0xf9, 0x54},
- },
- { /* 39P */
- addYX: fp.Elt{0x79, 0x92, 0x66, 0x29, 0x04, 0xf2, 0xad, 0x0f, 0x4a, 0x72, 0x7d, 0x7d, 0x04, 0xa2, 0xdd, 0x3a, 0xf1, 0x60, 0x57, 0x8c, 0x82, 0x94, 0x3d, 0x6f, 0x9e, 0x53, 0xb7, 0x2b, 0xc5, 0xe9, 0x7f, 0x3d},
- subYX: fp.Elt{0xcd, 0x1e, 0xb1, 0x16, 0xc6, 0xaf, 0x7d, 0x17, 0x79, 0x64, 0x57, 0xfa, 0x9c, 0x4b, 0x76, 0x89, 0x85, 0xe7, 0xec, 0xe6, 0x10, 0xa1, 0xa8, 0xb7, 0xf0, 0xdb, 0x85, 0xbe, 0x9f, 0x83, 0xe6, 0x78},
- dt2: fp.Elt{0x6b, 0x85, 0xb8, 0x37, 0xf7, 0x2d, 0x33, 0x70, 0x8a, 0x17, 0x1a, 0x04, 0x43, 0x5d, 0xd0, 0x75, 0x22, 0x9e, 0xe5, 0xa0, 0x4a, 0xf7, 0x0f, 0x32, 0x42, 0x82, 0x08, 0x50, 0xf3, 0x68, 0xf2, 0x70},
- },
- { /* 41P */
- addYX: fp.Elt{0x47, 0x5f, 0x80, 0xb1, 0x83, 0x45, 0x86, 0x66, 0x19, 0x7c, 0xdd, 0x60, 0xd1, 0xc5, 0x35, 0xf5, 0x06, 0xb0, 0x4c, 0x1e, 0xb7, 0x4e, 0x87, 0xe9, 0xd9, 0x89, 0xd8, 0xfa, 0x5c, 0x34, 0x0d, 0x7c},
- subYX: fp.Elt{0x55, 0xf3, 0xdc, 0x70, 0x20, 0x11, 0x24, 0x23, 0x17, 0xe1, 0xfc, 0xe7, 0x7e, 0xc9, 0x0c, 0x38, 0x98, 0xb6, 0x52, 0x35, 0xed, 0xde, 0x1d, 0xb3, 0xb9, 0xc4, 0xb8, 0x39, 0xc0, 0x56, 0x4e, 0x40},
- dt2: fp.Elt{0x8a, 0x33, 0x78, 0x8c, 0x4b, 0x1f, 0x1f, 0x59, 0xe1, 0xb5, 0xe0, 0x67, 0xb1, 0x6a, 0x36, 0xa0, 0x44, 0x3d, 0x5f, 0xb4, 0x52, 0x41, 0xbc, 0x5c, 0x77, 0xc7, 0xae, 0x2a, 0x76, 0x54, 0xd7, 0x20},
- },
- { /* 43P */
- addYX: fp.Elt{0x58, 0xb7, 0x3b, 0xc7, 0x6f, 0xc3, 0x8f, 0x5e, 0x9a, 0xbb, 0x3c, 0x36, 0xa5, 0x43, 0xe5, 0xac, 0x22, 0xc9, 0x3b, 0x90, 0x7d, 0x4a, 0x93, 0xa9, 0x62, 0xec, 0xce, 0xf3, 0x46, 0x1e, 0x8f, 0x2b},
- subYX: fp.Elt{0x43, 0xf5, 0xb9, 0x35, 0xb1, 0xfe, 0x74, 0x9d, 0x6c, 0x95, 0x8c, 0xde, 0xf1, 0x7d, 0xb3, 0x84, 0xa9, 0x8b, 0x13, 0x57, 0x07, 0x2b, 0x32, 0xe9, 0xe1, 0x4c, 0x0b, 0x79, 0xa8, 0xad, 0xb8, 0x38},
- dt2: fp.Elt{0x5d, 0xf9, 0x51, 0xdf, 0x9c, 0x4a, 0xc0, 0xb5, 0xac, 0xde, 0x1f, 0xcb, 0xae, 0x52, 0x39, 0x2b, 0xda, 0x66, 0x8b, 0x32, 0x8b, 0x6d, 0x10, 0x1d, 0x53, 0x19, 0xba, 0xce, 0x32, 0xeb, 0x9a, 0x04},
- },
- { /* 45P */
- addYX: fp.Elt{0x31, 0x79, 0xfc, 0x75, 0x0b, 0x7d, 0x50, 0xaa, 0xd3, 0x25, 0x67, 0x7a, 0x4b, 0x92, 0xef, 0x0f, 0x30, 0x39, 0x6b, 0x39, 0x2b, 0x54, 0x82, 0x1d, 0xfc, 0x74, 0xf6, 0x30, 0x75, 0xe1, 0x5e, 0x79},
- subYX: fp.Elt{0x7e, 0xfe, 0xdc, 0x63, 0x3c, 0x7d, 0x76, 0xd7, 0x40, 0x6e, 0x85, 0x97, 0x48, 0x59, 0x9c, 0x20, 0x13, 0x7c, 0x4f, 0xe1, 0x61, 0x68, 0x67, 0xb6, 0xfc, 0x25, 0xd6, 0xc8, 0xe0, 0x65, 0xc6, 0x51},
- dt2: fp.Elt{0x81, 0xbd, 0xec, 0x52, 0x0a, 0x5b, 0x4a, 0x25, 0xe7, 0xaf, 0x34, 0xe0, 0x6e, 0x1f, 0x41, 0x5d, 0x31, 0x4a, 0xee, 0xca, 0x0d, 0x4d, 0xa2, 0xe6, 0x77, 0x44, 0xc5, 0x9d, 0xf4, 0x9b, 0xd1, 0x6c},
- },
- { /* 47P */
- addYX: fp.Elt{0x86, 0xc3, 0xaf, 0x65, 0x21, 0x61, 0xfe, 0x1f, 0x10, 0x1b, 0xd5, 0xb8, 0x88, 0x2a, 0x2a, 0x08, 0xaa, 0x0b, 0x99, 0x20, 0x7e, 0x62, 0xf6, 0x76, 0xe7, 0x43, 0x9e, 0x42, 0xa7, 0xb3, 0x01, 0x5e},
- subYX: fp.Elt{0xa3, 0x9c, 0x17, 0x52, 0x90, 0x61, 0x87, 0x7e, 0x85, 0x9f, 0x2c, 0x0b, 0x06, 0x0a, 0x1d, 0x57, 0x1e, 0x71, 0x99, 0x84, 0xa8, 0xba, 0xa2, 0x80, 0x38, 0xe6, 0xb2, 0x40, 0xdb, 0xf3, 0x20, 0x75},
- dt2: fp.Elt{0xa1, 0x57, 0x93, 0xd3, 0xe3, 0x0b, 0xb5, 0x3d, 0xa5, 0x94, 0x9e, 0x59, 0xdd, 0x6c, 0x7b, 0x96, 0x6e, 0x1e, 0x31, 0xdf, 0x64, 0x9a, 0x30, 0x1a, 0x86, 0xc9, 0xf3, 0xce, 0x9c, 0x2c, 0x09, 0x71},
- },
- { /* 49P */
- addYX: fp.Elt{0xcf, 0x1d, 0x05, 0x74, 0xac, 0xd8, 0x6b, 0x85, 0x1e, 0xaa, 0xb7, 0x55, 0x08, 0xa4, 0xf6, 0x03, 0xeb, 0x3c, 0x74, 0xc9, 0xcb, 0xe7, 0x4a, 0x3a, 0xde, 0xab, 0x37, 0x71, 0xbb, 0xa5, 0x73, 0x41},
- subYX: fp.Elt{0x8c, 0x91, 0x64, 0x03, 0x3f, 0x52, 0xd8, 0x53, 0x1c, 0x6b, 0xab, 0x3f, 0xf4, 0x04, 0xb4, 0xa2, 0xa4, 0xe5, 0x81, 0x66, 0x9e, 0x4a, 0x0b, 0x08, 0xa7, 0x7b, 0x25, 0xd0, 0x03, 0x5b, 0xa1, 0x0e},
- dt2: fp.Elt{0x8a, 0x21, 0xf9, 0xf0, 0x31, 0x6e, 0xc5, 0x17, 0x08, 0x47, 0xfc, 0x1a, 0x2b, 0x6e, 0x69, 0x5a, 0x76, 0xf1, 0xb2, 0xf4, 0x68, 0x16, 0x93, 0xf7, 0x67, 0x3a, 0x4e, 0x4a, 0x61, 0x65, 0xc5, 0x5f},
- },
- { /* 51P */
- addYX: fp.Elt{0x8e, 0x98, 0x90, 0x77, 0xe6, 0xe1, 0x92, 0x48, 0x22, 0xd7, 0x5c, 0x1c, 0x0f, 0x95, 0xd5, 0x01, 0xed, 0x3e, 0x92, 0xe5, 0x9a, 0x81, 0xb0, 0xe3, 0x1b, 0x65, 0x46, 0x9d, 0x40, 0xc7, 0x14, 0x32},
- subYX: fp.Elt{0xe5, 0x7a, 0x6d, 0xc4, 0x0d, 0x57, 0x6e, 0x13, 0x8f, 0xdc, 0xf8, 0x54, 0xcc, 0xaa, 0xd0, 0x0f, 0x86, 0xad, 0x0d, 0x31, 0x03, 0x9f, 0x54, 0x59, 0xa1, 0x4a, 0x45, 0x4c, 0x41, 0x1c, 0x71, 0x62},
- dt2: fp.Elt{0x70, 0x17, 0x65, 0x06, 0x74, 0x82, 0x29, 0x13, 0x36, 0x94, 0x27, 0x8a, 0x66, 0xa0, 0xa4, 0x3b, 0x3c, 0x22, 0x5d, 0x18, 0xec, 0xb8, 0xb6, 0xd9, 0x3c, 0x83, 0xcb, 0x3e, 0x07, 0x94, 0xea, 0x5b},
- },
- { /* 53P */
- addYX: fp.Elt{0xf8, 0xd2, 0x43, 0xf3, 0x63, 0xce, 0x70, 0xb4, 0xf1, 0xe8, 0x43, 0x05, 0x8f, 0xba, 0x67, 0x00, 0x6f, 0x7b, 0x11, 0xa2, 0xa1, 0x51, 0xda, 0x35, 0x2f, 0xbd, 0xf1, 0x44, 0x59, 0x78, 0xd0, 0x4a},
- subYX: fp.Elt{0xe4, 0x9b, 0xc8, 0x12, 0x09, 0xbf, 0x1d, 0x64, 0x9c, 0x57, 0x6e, 0x7d, 0x31, 0x8b, 0xf3, 0xac, 0x65, 0xb0, 0x97, 0xf6, 0x02, 0x9e, 0xfe, 0xab, 0xec, 0x1e, 0xf6, 0x48, 0xc1, 0xd5, 0xac, 0x3a},
- dt2: fp.Elt{0x01, 0x83, 0x31, 0xc3, 0x34, 0x3b, 0x8e, 0x85, 0x26, 0x68, 0x31, 0x07, 0x47, 0xc0, 0x99, 0xdc, 0x8c, 0xa8, 0x9d, 0xd3, 0x2e, 0x5b, 0x08, 0x34, 0x3d, 0x85, 0x02, 0xd9, 0xb1, 0x0c, 0xff, 0x3a},
- },
- { /* 55P */
- addYX: fp.Elt{0x05, 0x35, 0xc5, 0xf4, 0x0b, 0x43, 0x26, 0x92, 0x83, 0x22, 0x1f, 0x26, 0x13, 0x9c, 0xe4, 0x68, 0xc6, 0x27, 0xd3, 0x8f, 0x78, 0x33, 0xef, 0x09, 0x7f, 0x9e, 0xd9, 0x2b, 0x73, 0x9f, 0xcf, 0x2c},
- subYX: fp.Elt{0x5e, 0x40, 0x20, 0x3a, 0xeb, 0xc7, 0xc5, 0x87, 0xc9, 0x56, 0xad, 0xed, 0xef, 0x11, 0xe3, 0x8e, 0xf9, 0xd5, 0x29, 0xad, 0x48, 0x2e, 0x25, 0x29, 0x1d, 0x25, 0xcd, 0xf4, 0x86, 0x7e, 0x0e, 0x11},
- dt2: fp.Elt{0xe4, 0xf5, 0x03, 0xd6, 0x9e, 0xd8, 0xc0, 0x57, 0x0c, 0x20, 0xb0, 0xf0, 0x28, 0x86, 0x88, 0x12, 0xb7, 0x3b, 0x2e, 0xa0, 0x09, 0x27, 0x17, 0x53, 0x37, 0x3a, 0x69, 0xb9, 0xe0, 0x57, 0xc5, 0x05},
- },
- { /* 57P */
- addYX: fp.Elt{0xb0, 0x0e, 0xc2, 0x89, 0xb0, 0xbb, 0x76, 0xf7, 0x5c, 0xd8, 0x0f, 0xfa, 0xf6, 0x5b, 0xf8, 0x61, 0xfb, 0x21, 0x44, 0x63, 0x4e, 0x3f, 0xb9, 0xb6, 0x05, 0x12, 0x86, 0x41, 0x08, 0xef, 0x9f, 0x28},
- subYX: fp.Elt{0x6f, 0x7e, 0xc9, 0x1f, 0x31, 0xce, 0xf9, 0xd8, 0xae, 0xfd, 0xf9, 0x11, 0x30, 0x26, 0x3f, 0x7a, 0xdd, 0x25, 0xed, 0x8b, 0xa0, 0x7e, 0x5b, 0xe1, 0x5a, 0x87, 0xe9, 0x8f, 0x17, 0x4c, 0x15, 0x6e},
- dt2: fp.Elt{0xbf, 0x9a, 0xd6, 0xfe, 0x36, 0x63, 0x61, 0xcf, 0x4f, 0xc9, 0x35, 0x83, 0xe7, 0xe4, 0x16, 0x9b, 0xe7, 0x7f, 0x3a, 0x75, 0x65, 0x97, 0x78, 0x13, 0x19, 0xa3, 0x5c, 0xa9, 0x42, 0xf6, 0xfb, 0x6a},
- },
- { /* 59P */
- addYX: fp.Elt{0xcc, 0xa8, 0x13, 0xf9, 0x70, 0x50, 0xe5, 0x5d, 0x61, 0xf5, 0x0c, 0x2b, 0x7b, 0x16, 0x1d, 0x7d, 0x89, 0xd4, 0xea, 0x90, 0xb6, 0x56, 0x29, 0xda, 0xd9, 0x1e, 0x80, 0xdb, 0xce, 0x93, 0xc0, 0x12},
- subYX: fp.Elt{0xc1, 0xd2, 0xf5, 0x62, 0x0c, 0xde, 0xa8, 0x7d, 0x9a, 0x7b, 0x0e, 0xb0, 0xa4, 0x3d, 0xfc, 0x98, 0xe0, 0x70, 0xad, 0x0d, 0xda, 0x6a, 0xeb, 0x7d, 0xc4, 0x38, 0x50, 0xb9, 0x51, 0xb8, 0xb4, 0x0d},
- dt2: fp.Elt{0x0f, 0x19, 0xb8, 0x08, 0x93, 0x7f, 0x14, 0xfc, 0x10, 0xe3, 0x1a, 0xa1, 0xa0, 0x9d, 0x96, 0x06, 0xfd, 0xd7, 0xc7, 0xda, 0x72, 0x55, 0xe7, 0xce, 0xe6, 0x5c, 0x63, 0xc6, 0x99, 0x87, 0xaa, 0x33},
- },
- { /* 61P */
- addYX: fp.Elt{0xb1, 0x6c, 0x15, 0xfc, 0x88, 0xf5, 0x48, 0x83, 0x27, 0x6d, 0x0a, 0x1a, 0x9b, 0xba, 0xa2, 0x6d, 0xb6, 0x5a, 0xca, 0x87, 0x5c, 0x2d, 0x26, 0xe2, 0xa6, 0x89, 0xd5, 0xc8, 0xc1, 0xd0, 0x2c, 0x21},
- subYX: fp.Elt{0xf2, 0x5c, 0x08, 0xbd, 0x1e, 0xf5, 0x0f, 0xaf, 0x1f, 0x3f, 0xd3, 0x67, 0x89, 0x1a, 0xf5, 0x78, 0x3c, 0x03, 0x60, 0x50, 0xe1, 0xbf, 0xc2, 0x6e, 0x86, 0x1a, 0xe2, 0xe8, 0x29, 0x6f, 0x3c, 0x23},
- dt2: fp.Elt{0x81, 0xc7, 0x18, 0x7f, 0x10, 0xd5, 0xf4, 0xd2, 0x28, 0x9d, 0x7e, 0x52, 0xf2, 0xcd, 0x2e, 0x12, 0x41, 0x33, 0x3d, 0x3d, 0x2a, 0x86, 0x0a, 0xa7, 0xe3, 0x4c, 0x91, 0x11, 0x89, 0x77, 0xb7, 0x1d},
- },
- { /* 63P */
- addYX: fp.Elt{0xb6, 0x1a, 0x70, 0xdd, 0x69, 0x47, 0x39, 0xb3, 0xa5, 0x8d, 0xcf, 0x19, 0xd4, 0xde, 0xb8, 0xe2, 0x52, 0xc8, 0x2a, 0xfd, 0x61, 0x41, 0xdf, 0x15, 0xbe, 0x24, 0x7d, 0x01, 0x8a, 0xca, 0xe2, 0x7a},
- subYX: fp.Elt{0x6f, 0xc2, 0x6b, 0x7c, 0x39, 0x52, 0xf3, 0xdd, 0x13, 0x01, 0xd5, 0x53, 0xcc, 0xe2, 0x97, 0x7a, 0x30, 0xa3, 0x79, 0xbf, 0x3a, 0xf4, 0x74, 0x7c, 0xfc, 0xad, 0xe2, 0x26, 0xad, 0x97, 0xad, 0x31},
- dt2: fp.Elt{0x62, 0xb9, 0x20, 0x09, 0xed, 0x17, 0xe8, 0xb7, 0x9d, 0xda, 0x19, 0x3f, 0xcc, 0x18, 0x85, 0x1e, 0x64, 0x0a, 0x56, 0x25, 0x4f, 0xc1, 0x91, 0xe4, 0x83, 0x2c, 0x62, 0xa6, 0x53, 0xfc, 0xd1, 0x1e},
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go
deleted file mode 100644
index 324bd8f3..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go
+++ /dev/null
@@ -1,411 +0,0 @@
-// Package ed448 implements Ed448 signature scheme as described in RFC-8032.
-//
-// This package implements two signature variants.
-//
-// | Scheme Name | Sign Function | Verification | Context |
-// |-------------|-------------------|---------------|-------------------|
-// | Ed448 | Sign | Verify | Yes, can be empty |
-// | Ed448Ph | SignPh | VerifyPh | Yes, can be empty |
-// | All above | (PrivateKey).Sign | VerifyAny | As above |
-//
-// Specific functions for sign and verify are defined. A generic signing
-// function for all schemes is available through the crypto.Signer interface,
-// which is implemented by the PrivateKey type. A correspond all-in-one
-// verification method is provided by the VerifyAny function.
-//
-// Both schemes require a context string for domain separation. This parameter
-// is passed using a SignerOptions struct defined in this package.
-//
-// References:
-//
-// - RFC8032: https://rfc-editor.org/rfc/rfc8032.txt
-// - EdDSA for more curves: https://eprint.iacr.org/2015/677
-// - High-speed high-security signatures: https://doi.org/10.1007/s13389-012-0027-1
-package ed448
-
-import (
- "bytes"
- "crypto"
- cryptoRand "crypto/rand"
- "crypto/subtle"
- "errors"
- "fmt"
- "io"
- "strconv"
-
- "github.com/cloudflare/circl/ecc/goldilocks"
- "github.com/cloudflare/circl/internal/sha3"
- "github.com/cloudflare/circl/sign"
-)
-
-const (
- // ContextMaxSize is the maximum length (in bytes) allowed for context.
- ContextMaxSize = 255
- // PublicKeySize is the length in bytes of Ed448 public keys.
- PublicKeySize = 57
- // PrivateKeySize is the length in bytes of Ed448 private keys.
- PrivateKeySize = 114
- // SignatureSize is the length in bytes of signatures.
- SignatureSize = 114
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 57
-)
-
-const (
- paramB = 456 / 8 // Size of keys in bytes.
- hashSize = 2 * paramB // Size of the hash function's output.
-)
-
-// SignerOptions implements crypto.SignerOpts and augments with parameters
-// that are specific to the Ed448 signature schemes.
-type SignerOptions struct {
- // Hash must be crypto.Hash(0) for both Ed448 and Ed448Ph.
- crypto.Hash
-
- // Context is an optional domain separation string for signing.
- // Its length must be less or equal than 255 bytes.
- Context string
-
- // Scheme is an identifier for choosing a signature scheme.
- Scheme SchemeID
-}
-
-// SchemeID is an identifier for each signature scheme.
-type SchemeID uint
-
-const (
- ED448 SchemeID = iota
- ED448Ph
-)
-
-// PublicKey is the type of Ed448 public keys.
-type PublicKey []byte
-
-// Equal reports whether pub and x have the same value.
-func (pub PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(PublicKey)
- return ok && bytes.Equal(pub, xx)
-}
-
-// PrivateKey is the type of Ed448 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Equal reports whether priv and x have the same value.
-func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(PrivateKey)
- return ok && subtle.ConstantTimeCompare(priv, xx) == 1
-}
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, priv[SeedSize:])
- return PublicKey(publicKey)
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:SeedSize])
- return seed
-}
-
-func (priv PrivateKey) Scheme() sign.Scheme { return sch }
-
-func (pub PublicKey) Scheme() sign.Scheme { return sch }
-
-func (priv PrivateKey) MarshalBinary() (data []byte, err error) {
- privateKey := make(PrivateKey, PrivateKeySize)
- copy(privateKey, priv)
- return privateKey, nil
-}
-
-func (pub PublicKey) MarshalBinary() (data []byte, err error) {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, pub)
- return publicKey, nil
-}
-
-// Sign creates a signature of a message given a key pair.
-// This function supports all the two signature variants defined in RFC-8032,
-// namely Ed448 (or pure EdDSA) and Ed448Ph.
-// The opts.HashFunc() must return zero to the specify Ed448 variant. This can
-// be achieved by passing crypto.Hash(0) as the value for opts.
-// Use an Options struct to pass a bool indicating that the ed448Ph variant
-// should be used.
-// The struct can also be optionally used to pass a context string for signing.
-func (priv PrivateKey) Sign(
- rand io.Reader,
- message []byte,
- opts crypto.SignerOpts,
-) (signature []byte, err error) {
- var ctx string
- var scheme SchemeID
-
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED448 && opts.HashFunc() == crypto.Hash(0):
- return Sign(priv, message, ctx), nil
- case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0):
- return SignPh(priv, message, ctx), nil
- default:
- return nil, errors.New("ed448: bad hash algorithm")
- }
-}
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptoRand.Reader
- }
-
- seed := make(PrivateKey, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
-
- return publicKey, privateKey, nil
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- privateKey := make([]byte, PrivateKeySize)
- newKeyFromSeed(privateKey, seed)
- return privateKey
-}
-
-func newKeyFromSeed(privateKey, seed []byte) {
- if l := len(seed); l != SeedSize {
- panic("ed448: bad seed length: " + strconv.Itoa(l))
- }
-
- var h [hashSize]byte
- H := sha3.NewShake256()
- _, _ = H.Write(seed)
- _, _ = H.Read(h[:])
- s := &goldilocks.Scalar{}
- deriveSecretScalar(s, h[:paramB])
-
- copy(privateKey[:SeedSize], seed)
- _ = goldilocks.Curve{}.ScalarBaseMult(s).ToBytes(privateKey[SeedSize:])
-}
-
-func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) {
- if len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx))))
- }
-
- H := sha3.NewShake256()
- var PHM []byte
-
- if preHash {
- var h [64]byte
- _, _ = H.Write(message)
- _, _ = H.Read(h[:])
- PHM = h[:]
- H.Reset()
- } else {
- PHM = message
- }
-
- // 1. Hash the 57-byte private key using SHAKE256(x, 114).
- var h [hashSize]byte
- _, _ = H.Write(privateKey[:SeedSize])
- _, _ = H.Read(h[:])
- s := &goldilocks.Scalar{}
- deriveSecretScalar(s, h[:paramB])
- prefix := h[paramB:]
-
- // 2. Compute SHAKE256(dom4(F, C) || prefix || PH(M), 114).
- var rPM [hashSize]byte
- H.Reset()
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(prefix)
- _, _ = H.Write(PHM)
- _, _ = H.Read(rPM[:])
-
- // 3. Compute the point [r]B.
- r := &goldilocks.Scalar{}
- r.FromBytes(rPM[:])
- R := (&[paramB]byte{})[:]
- if err := (goldilocks.Curve{}.ScalarBaseMult(r).ToBytes(R)); err != nil {
- panic(err)
- }
- // 4. Compute SHAKE256(dom4(F, C) || R || A || PH(M), 114)
- var hRAM [hashSize]byte
- H.Reset()
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(privateKey[SeedSize:])
- _, _ = H.Write(PHM)
- _, _ = H.Read(hRAM[:])
-
- // 5. Compute S = (r + k * s) mod order.
- k := &goldilocks.Scalar{}
- k.FromBytes(hRAM[:])
- S := &goldilocks.Scalar{}
- S.Mul(k, s)
- S.Add(S, r)
-
- // 6. The signature is the concatenation of R and S.
- copy(signature[:paramB], R[:])
- copy(signature[paramB:], S[:])
-}
-
-// Sign signs the message with privateKey and returns a signature.
-// This function supports the signature variant defined in RFC-8032: Ed448,
-// also known as the pure version of EdDSA.
-// It will panic if len(privateKey) is not PrivateKeySize.
-func Sign(priv PrivateKey, message []byte, ctx string) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, priv, message, []byte(ctx), false)
- return signature
-}
-
-// SignPh creates a signature of a message given a keypair.
-// This function supports the signature variant defined in RFC-8032: Ed448ph,
-// meaning it internally hashes the message using SHAKE-256.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func SignPh(priv PrivateKey, message []byte, ctx string) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, priv, message, []byte(ctx), true)
- return signature
-}
-
-func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool {
- if len(public) != PublicKeySize ||
- len(signature) != SignatureSize ||
- len(ctx) > ContextMaxSize ||
- !isLessThanOrder(signature[paramB:]) {
- return false
- }
-
- P, err := goldilocks.FromBytes(public)
- if err != nil {
- return false
- }
-
- H := sha3.NewShake256()
- var PHM []byte
-
- if preHash {
- var h [64]byte
- _, _ = H.Write(message)
- _, _ = H.Read(h[:])
- PHM = h[:]
- H.Reset()
- } else {
- PHM = message
- }
-
- var hRAM [hashSize]byte
- R := signature[:paramB]
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(public)
- _, _ = H.Write(PHM)
- _, _ = H.Read(hRAM[:])
-
- k := &goldilocks.Scalar{}
- k.FromBytes(hRAM[:])
- S := &goldilocks.Scalar{}
- S.FromBytes(signature[paramB:])
-
- encR := (&[paramB]byte{})[:]
- P.Neg()
- _ = goldilocks.Curve{}.CombinedMult(S, k, P).ToBytes(encR)
- return bytes.Equal(R, encR)
-}
-
-// VerifyAny returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports all the two signature variants defined in RFC-8032,
-// namely Ed448 (or pure EdDSA) and Ed448Ph.
-// The opts.HashFunc() must return zero, this can be achieved by passing
-// crypto.Hash(0) as the value for opts.
-// Use a SignerOptions struct to pass a context string for signing.
-func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED448 && opts.HashFunc() == crypto.Hash(0):
- return Verify(public, message, signature, ctx)
- case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0):
- return VerifyPh(public, message, signature, ctx)
- default:
- return false
- }
-}
-
-// Verify returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed448,
-// also known as the pure version of EdDSA.
-func Verify(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), false)
-}
-
-// VerifyPh returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed448ph,
-// meaning it internally hashes the message using SHAKE-256.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), true)
-}
-
-func deriveSecretScalar(s *goldilocks.Scalar, h []byte) {
- h[0] &= 0xFC // The two least significant bits of the first octet are cleared,
- h[paramB-1] = 0x00 // all eight bits the last octet are cleared, and
- h[paramB-2] |= 0x80 // the highest bit of the second to last octet is set.
- s.FromBytes(h[:paramB])
-}
-
-// isLessThanOrder returns true if 0 <= x < order and if the last byte of x is zero.
-func isLessThanOrder(x []byte) bool {
- order := goldilocks.Curve{}.Order()
- i := len(order) - 1
- for i > 0 && x[i] == order[i] {
- i--
- }
- return x[paramB-1] == 0 && x[i] < order[i]
-}
-
-func writeDom(h io.Writer, ctx []byte, preHash bool) {
- dom4 := "SigEd448"
- _, _ = h.Write([]byte(dom4))
-
- if preHash {
- _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))})
- } else {
- _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))})
- }
- _, _ = h.Write(ctx)
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go
deleted file mode 100644
index 22da8bc0..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package ed448
-
-import (
- "crypto/rand"
- "encoding/asn1"
-
- "github.com/cloudflare/circl/sign"
-)
-
-var sch sign.Scheme = &scheme{}
-
-// Scheme returns a signature interface.
-func Scheme() sign.Scheme { return sch }
-
-type scheme struct{}
-
-func (*scheme) Name() string { return "Ed448" }
-func (*scheme) PublicKeySize() int { return PublicKeySize }
-func (*scheme) PrivateKeySize() int { return PrivateKeySize }
-func (*scheme) SignatureSize() int { return SignatureSize }
-func (*scheme) SeedSize() int { return SeedSize }
-func (*scheme) TLSIdentifier() uint { return 0x0808 }
-func (*scheme) SupportsContext() bool { return true }
-func (*scheme) Oid() asn1.ObjectIdentifier {
- return asn1.ObjectIdentifier{1, 3, 101, 113}
-}
-
-func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) {
- return GenerateKey(rand.Reader)
-}
-
-func (*scheme) Sign(
- sk sign.PrivateKey,
- message []byte,
- opts *sign.SignatureOpts,
-) []byte {
- priv, ok := sk.(PrivateKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- ctx := ""
- if opts != nil {
- ctx = opts.Context
- }
- return Sign(priv, message, ctx)
-}
-
-func (*scheme) Verify(
- pk sign.PublicKey,
- message, signature []byte,
- opts *sign.SignatureOpts,
-) bool {
- pub, ok := pk.(PublicKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- ctx := ""
- if opts != nil {
- ctx = opts.Context
- }
- return Verify(pub, message, signature, ctx)
-}
-
-func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) {
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
- return publicKey, privateKey
-}
-
-func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) {
- if len(buf) < PublicKeySize {
- return nil, sign.ErrPubKeySize
- }
- pub := make(PublicKey, PublicKeySize)
- copy(pub, buf[:PublicKeySize])
- return pub, nil
-}
-
-func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) {
- if len(buf) < PrivateKeySize {
- return nil, sign.ErrPrivKeySize
- }
- priv := make(PrivateKey, PrivateKeySize)
- copy(priv, buf[:PrivateKeySize])
- return priv, nil
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go
deleted file mode 100644
index 13b20fa4..00000000
--- a/vendor/github.com/cloudflare/circl/sign/sign.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Package sign provides unified interfaces for signature schemes.
-//
-// A register of schemes is available in the package
-//
-// github.com/cloudflare/circl/sign/schemes
-package sign
-
-import (
- "crypto"
- "encoding"
- "errors"
-)
-
-type SignatureOpts struct {
- // If non-empty, includes the given context in the signature if supported
- // and will cause an error during signing otherwise.
- Context string
-}
-
-// A public key is used to verify a signature set by the corresponding private
-// key.
-type PublicKey interface {
- // Returns the signature scheme for this public key.
- Scheme() Scheme
- Equal(crypto.PublicKey) bool
- encoding.BinaryMarshaler
- crypto.PublicKey
-}
-
-// A private key allows one to create signatures.
-type PrivateKey interface {
- // Returns the signature scheme for this private key.
- Scheme() Scheme
- Equal(crypto.PrivateKey) bool
- // For compatibility with Go standard library
- crypto.Signer
- crypto.PrivateKey
- encoding.BinaryMarshaler
-}
-
-// A Scheme represents a specific instance of a signature scheme.
-type Scheme interface {
- // Name of the scheme.
- Name() string
-
- // GenerateKey creates a new key-pair.
- GenerateKey() (PublicKey, PrivateKey, error)
-
- // Creates a signature using the PrivateKey on the given message and
- // returns the signature. opts are additional options which can be nil.
- //
- // Panics if key is nil or wrong type or opts context is not supported.
- Sign(sk PrivateKey, message []byte, opts *SignatureOpts) []byte
-
- // Checks whether the given signature is a valid signature set by
- // the private key corresponding to the given public key on the
- // given message. opts are additional options which can be nil.
- //
- // Panics if key is nil or wrong type or opts context is not supported.
- Verify(pk PublicKey, message []byte, signature []byte, opts *SignatureOpts) bool
-
- // Deterministically derives a keypair from a seed. If you're unsure,
- // you're better off using GenerateKey().
- //
- // Panics if seed is not of length SeedSize().
- DeriveKey(seed []byte) (PublicKey, PrivateKey)
-
- // Unmarshals a PublicKey from the provided buffer.
- UnmarshalBinaryPublicKey([]byte) (PublicKey, error)
-
- // Unmarshals a PublicKey from the provided buffer.
- UnmarshalBinaryPrivateKey([]byte) (PrivateKey, error)
-
- // Size of binary marshalled public keys.
- PublicKeySize() int
-
- // Size of binary marshalled public keys.
- PrivateKeySize() int
-
- // Size of signatures.
- SignatureSize() int
-
- // Size of seeds.
- SeedSize() int
-
- // Returns whether contexts are supported.
- SupportsContext() bool
-}
-
-var (
- // ErrTypeMismatch is the error used if types of, for instance, private
- // and public keys don't match.
- ErrTypeMismatch = errors.New("types mismatch")
-
- // ErrSeedSize is the error used if the provided seed is of the wrong
- // size.
- ErrSeedSize = errors.New("wrong seed size")
-
- // ErrPubKeySize is the error used if the provided public key is of
- // the wrong size.
- ErrPubKeySize = errors.New("wrong size for public key")
-
- // ErrPrivKeySize is the error used if the provided private key is of
- // the wrong size.
- ErrPrivKeySize = errors.New("wrong size for private key")
-
- // ErrContextNotSupported is the error used if a context is not
- // supported.
- ErrContextNotSupported = errors.New("context not supported")
-)
diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml
deleted file mode 100644
index bfc42120..00000000
--- a/vendor/github.com/felixge/httpsnoop/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.6
- - 1.7
- - 1.8
diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile
index 2d84889a..4e12afdd 100644
--- a/vendor/github.com/felixge/httpsnoop/Makefile
+++ b/vendor/github.com/felixge/httpsnoop/Makefile
@@ -1,7 +1,7 @@
.PHONY: ci generate clean
ci: clean generate
- go test -v ./...
+ go test -race -v ./...
generate:
go generate .
diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md
index ddcecd13..cf6b42f3 100644
--- a/vendor/github.com/felixge/httpsnoop/README.md
+++ b/vendor/github.com/felixge/httpsnoop/README.md
@@ -7,8 +7,8 @@ http.Handlers.
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
which is also exposed for users interested in a more low-level API.
-[](https://godoc.org/github.com/felixge/httpsnoop)
-[](https://travis-ci.org/felixge/httpsnoop)
+[](https://pkg.go.dev/github.com/felixge/httpsnoop)
+[](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
## Usage Example
diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
index b77cc7c0..bec7b71b 100644
--- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go
+++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
@@ -52,7 +52,7 @@ func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWri
return func(code int) {
next(code)
- if !headerWritten {
+ if !(code >= 100 && code <= 199) && !headerWritten {
m.Code = code
headerWritten = true
}
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
index 31cbdfb8..101cedde 100644
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
@@ -1,5 +1,5 @@
// +build go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
index ab99c07c..e0951df1 100644
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
@@ -1,5 +1,5 @@
// +build !go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop
diff --git a/vendor/github.com/flosch/pongo2/.gitattributes b/vendor/github.com/flosch/pongo2/.gitattributes
deleted file mode 100644
index fcadb2cf..00000000
--- a/vendor/github.com/flosch/pongo2/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-* text eol=lf
diff --git a/vendor/github.com/flosch/pongo2/.gitignore b/vendor/github.com/flosch/pongo2/.gitignore
deleted file mode 100644
index 1346be55..00000000
--- a/vendor/github.com/flosch/pongo2/.gitignore
+++ /dev/null
@@ -1,41 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-.idea
-.vscode
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-.project
-EBNF.txt
-test1.tpl
-pongo2_internal_test.go
-tpl-error.out
-/count.out
-/cover.out
-*.swp
-*.iml
-/cpu.out
-/mem.out
-/pongo2.test
-*.error
-/profile
-/coverage.out
-/pongo2_internal_test.ignore
diff --git a/vendor/github.com/flosch/pongo2/.travis.yml b/vendor/github.com/flosch/pongo2/.travis.yml
deleted file mode 100644
index e39e5d05..00000000
--- a/vendor/github.com/flosch/pongo2/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-os:
- - linux
- - osx
-go:
- - 1.12
-script:
- - go test -v
diff --git a/vendor/github.com/flosch/pongo2/AUTHORS b/vendor/github.com/flosch/pongo2/AUTHORS
deleted file mode 100644
index 601697cf..00000000
--- a/vendor/github.com/flosch/pongo2/AUTHORS
+++ /dev/null
@@ -1,11 +0,0 @@
-Main author and maintainer of pongo2:
-
-* Florian Schlachter
-
-Contributors (in no specific order):
-
-* @romanoaugusto88
-* @vitalbh
-* @blaubaer
-
-Feel free to add yourself to the list or to modify your entry if you did a contribution.
diff --git a/vendor/github.com/flosch/pongo2/LICENSE b/vendor/github.com/flosch/pongo2/LICENSE
deleted file mode 100644
index e876f869..00000000
--- a/vendor/github.com/flosch/pongo2/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013-2014 Florian Schlachter
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/flosch/pongo2/README.md b/vendor/github.com/flosch/pongo2/README.md
deleted file mode 100644
index e59694e2..00000000
--- a/vendor/github.com/flosch/pongo2/README.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
-
-[](https://pkg.go.dev/flosch/pongo2)
-[](https://travis-ci.org/flosch/pongo2)
-
-pongo2 is a Django-syntax like templating-language.
-
-Install/update using `go get` (no dependencies required by pongo2):
-
-```sh
-go get -u github.com/flosch/pongo2
-```
-
-Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)).
-
-## First impression of a template
-
-```django
-
-
- Our admins and users
-
- {# This is a short example to give you a quick overview of pongo2's syntax. #}
- {% macro user_details(user, is_admin=false) %}
-
-
-
- = 40) || (user.karma > calc_avg_karma(userlist)+5) %} class="karma-good"{%
- endif %}>
-
-
- {{ user }}
-
-
-
-
This user registered {{ user.register_date|naturaltime }}.
-
-
-
The user's biography:
-
- {{ user.biography|markdown|truncatewords_html:15 }}
- read more
-
-
- {% if is_admin %}
-
This user is an admin!
- {% endif %}
-
- {% endmacro %}
-
-
-
-
- Our admins
- {% for admin in adminlist %} {{ user_details(admin, true) }} {% endfor %}
-
- Our members
- {% for user in userlist %} {{ user_details(user) }} {% endfor %}
-
-
-```
-
-## Features
-
-- Syntax- and feature-set-compatible with [Django 1.7](https://django.readthedocs.io/en/1.7.x/topics/templates.html)
-- [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
-- [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
-- [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
-- Additional features:
- - Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
- - [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
-
-## Caveats
-
-### Filters
-
-- **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
-- **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
-- **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
-
-### Tags
-
-- **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
-- **now**: takes Go's time format (see **date** and **time**-filter).
-
-### Misc
-
-- **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
- `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
-
-## Add-ons, libraries and helpers
-
-### Official
-
-- [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
-
-### 3rd-party
-
-- [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
-- [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
-- [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
-- [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
-- [Build'n support for Iris' template engine](https://github.com/kataras/iris)
-- [pongo2gin](https://gitlab.com/go-box/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates
-- [pongo2-trans](https://github.com/digitalcrab/pongo2trans) - `trans`-tag implementation for internationalization
-- [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
-- [p2cli](https://github.com/wrouesnel/p2cli) - command line templating utility based on pongo2
-
-Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
-
-## Who's using pongo2
-
-[I'm compiling a list of pongo2 users](https://github.com/flosch/pongo2/issues/241). Add your project or company!
-
-## API-usage examples
-
-Please see the documentation for a full list of provided API methods.
-
-### A tiny example (template string)
-
-```go
-// Compile the template first (i. e. creating the AST)
-tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
-if err != nil {
- panic(err)
-}
-// Now you can render the template with the given
-// pongo2.Context how often you want to.
-out, err := tpl.Execute(pongo2.Context{"name": "florian"})
-if err != nil {
- panic(err)
-}
-fmt.Println(out) // Output: Hello Florian!
-```
-
-## Example server-usage (template file)
-
-```go
-package main
-
-import (
- "github.com/flosch/pongo2"
- "net/http"
-)
-
-// Pre-compiling the templates at application startup using the
-// little Must()-helper function (Must() will panic if FromFile()
-// or FromString() will return with an error - that's it).
-// It's faster to pre-compile it anywhere at startup and only
-// execute the template later.
-var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
-
-func examplePage(w http.ResponseWriter, r *http.Request) {
- // Execute the template per HTTP request
- err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
-}
-
-func main() {
- http.HandleFunc("/", examplePage)
- http.ListenAndServe(":8080", nil)
-}
-```
diff --git a/vendor/github.com/flosch/pongo2/context.go b/vendor/github.com/flosch/pongo2/context.go
deleted file mode 100644
index dbc5e3e3..00000000
--- a/vendor/github.com/flosch/pongo2/context.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "regexp"
-
- "errors"
-)
-
-var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
-
-var autoescape = true
-
-func SetAutoescape(newValue bool) {
- autoescape = newValue
-}
-
-// A Context type provides constants, variables, instances or functions to a template.
-//
-// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
-// Currently, context["pongo2"] contains the following keys:
-// 1. version: returns the version string
-//
-// Template examples for accessing items from your context:
-// {{ myconstant }}
-// {{ myfunc("test", 42) }}
-// {{ user.name }}
-// {{ pongo2.version }}
-type Context map[string]interface{}
-
-func (c Context) checkForValidIdentifiers() *Error {
- for k, v := range c {
- if !reIdentifiers.MatchString(k) {
- return &Error{
- Sender: "checkForValidIdentifiers",
- OrigError: fmt.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
- }
- }
- }
- return nil
-}
-
-// Update updates this context with the key/value-pairs from another context.
-func (c Context) Update(other Context) Context {
- for k, v := range other {
- c[k] = v
- }
- return c
-}
-
-// ExecutionContext contains all data important for the current rendering state.
-//
-// If you're writing a custom tag, your tag's Execute()-function will
-// have access to the ExecutionContext. This struct stores anything
-// about the current rendering process's Context including
-// the Context provided by the user (field Public).
-// You can safely use the Private context to provide data to the user's
-// template (like a 'forloop'-information). The Shared-context is used
-// to share data between tags. All ExecutionContexts share this context.
-//
-// Please be careful when accessing the Public data.
-// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
-//
-// To create your own execution context within tags, use the
-// NewChildExecutionContext(parent) function.
-type ExecutionContext struct {
- template *Template
-
- Autoescape bool
- Public Context
- Private Context
- Shared Context
-}
-
-var pongo2MetaContext = Context{
- "version": Version,
-}
-
-func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
- privateCtx := make(Context)
-
- // Make the pongo2-related funcs/vars available to the context
- privateCtx["pongo2"] = pongo2MetaContext
-
- return &ExecutionContext{
- template: tpl,
-
- Public: ctx,
- Private: privateCtx,
- Autoescape: autoescape,
- }
-}
-
-func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
- newctx := &ExecutionContext{
- template: parent.template,
-
- Public: parent.Public,
- Private: make(Context),
- Autoescape: parent.Autoescape,
- }
- newctx.Shared = parent.Shared
-
- // Copy all existing private items
- newctx.Private.Update(parent.Private)
-
- return newctx
-}
-
-func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
- return ctx.OrigError(errors.New(msg), token)
-}
-
-func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
- filename := ctx.template.name
- var line, col int
- if token != nil {
- // No tokens available
- // TODO: Add location (from where?)
- filename = token.Filename
- line = token.Line
- col = token.Col
- }
- return &Error{
- Template: ctx.template,
- Filename: filename,
- Line: line,
- Column: col,
- Token: token,
- Sender: "execution",
- OrigError: err,
- }
-}
-
-func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
- ctx.template.set.logf(format, args...)
-}
diff --git a/vendor/github.com/flosch/pongo2/doc.go b/vendor/github.com/flosch/pongo2/doc.go
deleted file mode 100644
index 5a23e2b2..00000000
--- a/vendor/github.com/flosch/pongo2/doc.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// A Django-syntax like template-engine
-//
-// Blog posts about pongo2 (including introduction and migration):
-// https://www.florian-schlachter.de/?tag=pongo2
-//
-// Complete documentation on the template language:
-// https://docs.djangoproject.com/en/dev/topics/templates/
-//
-// Try out pongo2 live in the pongo2 playground:
-// https://www.florian-schlachter.de/pongo2/
-//
-// Make sure to read README.md in the repository as well.
-//
-// A tiny example with template strings:
-//
-// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
-//
-// // Compile the template first (i. e. creating the AST)
-// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
-// if err != nil {
-// panic(err)
-// }
-// // Now you can render the template with the given
-// // pongo2.Context how often you want to.
-// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
-// if err != nil {
-// panic(err)
-// }
-// fmt.Println(out) // Output: Hello Fred!
-//
-package pongo2
diff --git a/vendor/github.com/flosch/pongo2/error.go b/vendor/github.com/flosch/pongo2/error.go
deleted file mode 100644
index 8aec8c10..00000000
--- a/vendor/github.com/flosch/pongo2/error.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package pongo2
-
-import (
- "bufio"
- "fmt"
- "os"
-)
-
-// The Error type is being used to address an error during lexing, parsing or
-// execution. If you want to return an error object (for example in your own
-// tag or filter) fill this object with as much information as you have.
-// Make sure "Sender" is always given (if you're returning an error within
-// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
-// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
-type Error struct {
- Template *Template
- Filename string
- Line int
- Column int
- Token *Token
- Sender string
- OrigError error
-}
-
-func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
- if e.Template == nil {
- e.Template = template
- }
-
- if e.Token == nil {
- e.Token = t
- if e.Line <= 0 {
- e.Line = t.Line
- e.Column = t.Col
- }
- }
-
- return e
-}
-
-// Returns a nice formatted error string.
-func (e *Error) Error() string {
- s := "[Error"
- if e.Sender != "" {
- s += " (where: " + e.Sender + ")"
- }
- if e.Filename != "" {
- s += " in " + e.Filename
- }
- if e.Line > 0 {
- s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
- if e.Token != nil {
- s += fmt.Sprintf(" near '%s'", e.Token.Val)
- }
- }
- s += "] "
- s += e.OrigError.Error()
- return s
-}
-
-// RawLine returns the affected line from the original template, if available.
-func (e *Error) RawLine() (line string, available bool, outErr error) {
- if e.Line <= 0 || e.Filename == "" {
- return "", false, nil
- }
-
- filename := e.Filename
- if e.Template != nil {
- filename = e.Template.set.resolveFilename(e.Template, e.Filename)
- }
- file, err := os.Open(filename)
- if err != nil {
- return "", false, err
- }
- defer func() {
- err := file.Close()
- if err != nil && outErr == nil {
- outErr = err
- }
- }()
-
- scanner := bufio.NewScanner(file)
- l := 0
- for scanner.Scan() {
- l++
- if l == e.Line {
- return scanner.Text(), true, nil
- }
- }
- return "", false, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/filters.go b/vendor/github.com/flosch/pongo2/filters.go
deleted file mode 100644
index 8d4c89e2..00000000
--- a/vendor/github.com/flosch/pongo2/filters.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package pongo2
-
-import (
- "fmt"
-)
-
-// FilterFunction is the type filter functions must fulfil
-type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
-
-var filters map[string]FilterFunction
-
-func init() {
- filters = make(map[string]FilterFunction)
-}
-
-// FilterExists returns true if the given filter is already registered
-func FilterExists(name string) bool {
- _, existing := filters[name]
- return existing
-}
-
-// RegisterFilter registers a new filter. If there's already a filter with the same
-// name, RegisterFilter will panic. You usually want to call this
-// function in the filter's init() function:
-// http://golang.org/doc/effective_go.html#init
-//
-// See http://www.florian-schlachter.de/post/pongo2/ for more about
-// writing filters and tags.
-func RegisterFilter(name string, fn FilterFunction) error {
- if FilterExists(name) {
- return fmt.Errorf("filter with name '%s' is already registered", name)
- }
- filters[name] = fn
- return nil
-}
-
-// ReplaceFilter replaces an already registered filter with a new implementation. Use this
-// function with caution since it allows you to change existing filter behaviour.
-func ReplaceFilter(name string, fn FilterFunction) error {
- if !FilterExists(name) {
- return fmt.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
- }
- filters[name] = fn
- return nil
-}
-
-// MustApplyFilter behaves like ApplyFilter, but panics on an error.
-func MustApplyFilter(name string, value *Value, param *Value) *Value {
- val, err := ApplyFilter(name, value, param)
- if err != nil {
- panic(err)
- }
- return val
-}
-
-// ApplyFilter applies a filter to a given value using the given parameters.
-// Returns a *pongo2.Value or an error.
-func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
- fn, existing := filters[name]
- if !existing {
- return nil, &Error{
- Sender: "applyfilter",
- OrigError: fmt.Errorf("Filter with name '%s' not found.", name),
- }
- }
-
- // Make sure param is a *Value
- if param == nil {
- param = AsValue(nil)
- }
-
- return fn(value, param)
-}
-
-type filterCall struct {
- token *Token
-
- name string
- parameter IEvaluator
-
- filterFunc FilterFunction
-}
-
-func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
- var param *Value
- var err *Error
-
- if fc.parameter != nil {
- param, err = fc.parameter.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- } else {
- param = AsValue(nil)
- }
-
- filteredValue, err := fc.filterFunc(v, param)
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
- }
- return filteredValue, nil
-}
-
-// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
-func (p *Parser) parseFilter() (*filterCall, *Error) {
- identToken := p.MatchType(TokenIdentifier)
-
- // Check filter ident
- if identToken == nil {
- return nil, p.Error("Filter name must be an identifier.", nil)
- }
-
- filter := &filterCall{
- token: identToken,
- name: identToken.Val,
- }
-
- // Get the appropriate filter function and bind it
- filterFn, exists := filters[identToken.Val]
- if !exists {
- return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
- }
-
- filter.filterFunc = filterFn
-
- // Check for filter-argument (2 tokens needed: ':' ARG)
- if p.Match(TokenSymbol, ":") != nil {
- if p.Peek(TokenSymbol, "}}") != nil {
- return nil, p.Error("Filter parameter required after ':'.", nil)
- }
-
- // Get filter argument expression
- v, err := p.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- filter.parameter = v
- }
-
- return filter, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/filters_builtin.go b/vendor/github.com/flosch/pongo2/filters_builtin.go
deleted file mode 100644
index c0ec6161..00000000
--- a/vendor/github.com/flosch/pongo2/filters_builtin.go
+++ /dev/null
@@ -1,927 +0,0 @@
-package pongo2
-
-/* Filters that are provided through github.com/flosch/pongo2-addons:
- ------------------------------------------------------------------
-
- filesizeformat
- slugify
- timesince
- timeuntil
-
- Filters that won't be added:
- ----------------------------
-
- get_static_prefix (reason: web-framework specific)
- pprint (reason: python-specific)
- static (reason: web-framework specific)
-
- Reconsideration (not implemented yet):
- --------------------------------------
-
- force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
- safeseq (reason: same reason as `force_escape`)
- unordered_list (python-specific; not sure whether needed or not)
- dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
- dictsortreversed (see dictsort)
-*/
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "net/url"
- "regexp"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-
- "errors"
-)
-
-func init() {
- rand.Seed(time.Now().Unix())
-
- RegisterFilter("escape", filterEscape)
- RegisterFilter("safe", filterSafe)
- RegisterFilter("escapejs", filterEscapejs)
-
- RegisterFilter("add", filterAdd)
- RegisterFilter("addslashes", filterAddslashes)
- RegisterFilter("capfirst", filterCapfirst)
- RegisterFilter("center", filterCenter)
- RegisterFilter("cut", filterCut)
- RegisterFilter("date", filterDate)
- RegisterFilter("default", filterDefault)
- RegisterFilter("default_if_none", filterDefaultIfNone)
- RegisterFilter("divisibleby", filterDivisibleby)
- RegisterFilter("first", filterFirst)
- RegisterFilter("floatformat", filterFloatformat)
- RegisterFilter("get_digit", filterGetdigit)
- RegisterFilter("iriencode", filterIriencode)
- RegisterFilter("join", filterJoin)
- RegisterFilter("last", filterLast)
- RegisterFilter("length", filterLength)
- RegisterFilter("length_is", filterLengthis)
- RegisterFilter("linebreaks", filterLinebreaks)
- RegisterFilter("linebreaksbr", filterLinebreaksbr)
- RegisterFilter("linenumbers", filterLinenumbers)
- RegisterFilter("ljust", filterLjust)
- RegisterFilter("lower", filterLower)
- RegisterFilter("make_list", filterMakelist)
- RegisterFilter("phone2numeric", filterPhone2numeric)
- RegisterFilter("pluralize", filterPluralize)
- RegisterFilter("random", filterRandom)
- RegisterFilter("removetags", filterRemovetags)
- RegisterFilter("rjust", filterRjust)
- RegisterFilter("slice", filterSlice)
- RegisterFilter("split", filterSplit)
- RegisterFilter("stringformat", filterStringformat)
- RegisterFilter("striptags", filterStriptags)
- RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
- RegisterFilter("title", filterTitle)
- RegisterFilter("truncatechars", filterTruncatechars)
- RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
- RegisterFilter("truncatewords", filterTruncatewords)
- RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
- RegisterFilter("upper", filterUpper)
- RegisterFilter("urlencode", filterUrlencode)
- RegisterFilter("urlize", filterUrlize)
- RegisterFilter("urlizetrunc", filterUrlizetrunc)
- RegisterFilter("wordcount", filterWordcount)
- RegisterFilter("wordwrap", filterWordwrap)
- RegisterFilter("yesno", filterYesno)
-
- RegisterFilter("float", filterFloat) // pongo-specific
- RegisterFilter("integer", filterInteger) // pongo-specific
-}
-
-func filterTruncatecharsHelper(s string, newLen int) string {
- runes := []rune(s)
- if newLen < len(runes) {
- if newLen >= 3 {
- return fmt.Sprintf("%s...", string(runes[:newLen-3]))
- }
- // Not enough space for the ellipsis
- return string(runes[:newLen])
- }
- return string(runes)
-}
-
-func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
- vLen := len(value)
- var tagStack []string
- idx := 0
-
- for idx < vLen && !cond() {
- c, s := utf8.DecodeRuneInString(value[idx:])
- if c == utf8.RuneError {
- idx += s
- continue
- }
-
- if c == '<' {
- newOutput.WriteRune(c)
- idx += s // consume "<"
-
- if idx+1 < vLen {
- if value[idx] == '/' {
- // Close tag
-
- newOutput.WriteString("/")
-
- tag := ""
- idx++ // consume "/"
-
- for idx < vLen {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- // End of tag found
- if c2 == '>' {
- idx++ // consume ">"
- break
- }
- tag += string(c2)
- idx += size2
- }
-
- if len(tagStack) > 0 {
- // Ideally, the close tag is TOP of tag stack
- // In malformed HTML, it must not be, so iterate through the stack and remove the tag
- for i := len(tagStack) - 1; i >= 0; i-- {
- if tagStack[i] == tag {
- // Found the tag
- tagStack[i] = tagStack[len(tagStack)-1]
- tagStack = tagStack[:len(tagStack)-1]
- break
- }
- }
- }
-
- newOutput.WriteString(tag)
- newOutput.WriteString(">")
- } else {
- // Open tag
-
- tag := ""
-
- params := false
- for idx < vLen {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- newOutput.WriteRune(c2)
-
- // End of tag found
- if c2 == '>' {
- idx++ // consume ">"
- break
- }
-
- if !params {
- if c2 == ' ' {
- params = true
- } else {
- tag += string(c2)
- }
- }
-
- idx += size2
- }
-
- // Add tag to stack
- tagStack = append(tagStack, tag)
- }
- }
- } else {
- idx = fn(c, s, idx)
- }
- }
-
- finalize()
-
- for i := len(tagStack) - 1; i >= 0; i-- {
- tag := tagStack[i]
- // Close everything from the regular tag stack
- newOutput.WriteString(fmt.Sprintf("%s>", tag))
- }
-}
-
-func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- newLen := param.Integer()
- return AsValue(filterTruncatecharsHelper(s, newLen)), nil
-}
-
-func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
- value := in.String()
- newLen := max(param.Integer()-3, 0)
-
- newOutput := bytes.NewBuffer(nil)
-
- textcounter := 0
-
- filterTruncateHTMLHelper(value, newOutput, func() bool {
- return textcounter >= newLen
- }, func(c rune, s int, idx int) int {
- textcounter++
- newOutput.WriteRune(c)
-
- return idx + s
- }, func() {
- if textcounter >= newLen && textcounter < len(value) {
- newOutput.WriteString("...")
- }
- })
-
- return AsSafeValue(newOutput.String()), nil
-}
-
-func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
- words := strings.Fields(in.String())
- n := param.Integer()
- if n <= 0 {
- return AsValue(""), nil
- }
- nlen := min(len(words), n)
- out := make([]string, 0, nlen)
- for i := 0; i < nlen; i++ {
- out = append(out, words[i])
- }
-
- if n < len(words) {
- out = append(out, "...")
- }
-
- return AsValue(strings.Join(out, " ")), nil
-}
-
-func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
- value := in.String()
- newLen := max(param.Integer(), 0)
-
- newOutput := bytes.NewBuffer(nil)
-
- wordcounter := 0
-
- filterTruncateHTMLHelper(value, newOutput, func() bool {
- return wordcounter >= newLen
- }, func(_ rune, _ int, idx int) int {
- // Get next word
- wordFound := false
-
- for idx < len(value) {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- if c2 == '<' {
- // HTML tag start, don't consume it
- return idx
- }
-
- newOutput.WriteRune(c2)
- idx += size2
-
- if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
- // Word ends here, stop capturing it now
- break
- } else {
- wordFound = true
- }
- }
-
- if wordFound {
- wordcounter++
- }
-
- return idx
- }, func() {
- if wordcounter >= newLen {
- newOutput.WriteString("...")
- }
- })
-
- return AsSafeValue(newOutput.String()), nil
-}
-
-func filterEscape(in *Value, param *Value) (*Value, *Error) {
- output := strings.Replace(in.String(), "&", "&", -1)
- output = strings.Replace(output, ">", ">", -1)
- output = strings.Replace(output, "<", "<", -1)
- output = strings.Replace(output, "\"", """, -1)
- output = strings.Replace(output, "'", "'", -1)
- return AsValue(output), nil
-}
-
-func filterSafe(in *Value, param *Value) (*Value, *Error) {
- return in, nil // nothing to do here, just to keep track of the safe application
-}
-
-func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
- sin := in.String()
-
- var b bytes.Buffer
-
- idx := 0
- for idx < len(sin) {
- c, size := utf8.DecodeRuneInString(sin[idx:])
- if c == utf8.RuneError {
- idx += size
- continue
- }
-
- if c == '\\' {
- // Escape seq?
- if idx+1 < len(sin) {
- switch sin[idx+1] {
- case 'r':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
- idx += 2
- continue
- case 'n':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
- idx += 2
- continue
- /*case '\'':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
- idx += 2
- continue
- case '"':
- b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
- idx += 2
- continue*/
- }
- }
- }
-
- if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
- b.WriteRune(c)
- } else {
- b.WriteString(fmt.Sprintf(`\u%04X`, c))
- }
-
- idx += size
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterAdd(in *Value, param *Value) (*Value, *Error) {
- if in.IsNumber() && param.IsNumber() {
- if in.IsFloat() || param.IsFloat() {
- return AsValue(in.Float() + param.Float()), nil
- }
- return AsValue(in.Integer() + param.Integer()), nil
- }
- // If in/param is not a number, we're relying on the
- // Value's String() conversion and just add them both together
- return AsValue(in.String() + param.String()), nil
-}
-
-func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
- output := strings.Replace(in.String(), "\\", "\\\\", -1)
- output = strings.Replace(output, "\"", "\\\"", -1)
- output = strings.Replace(output, "'", "\\'", -1)
- return AsValue(output), nil
-}
-
-func filterCut(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
-}
-
-func filterLength(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Len()), nil
-}
-
-func filterLengthis(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Len() == param.Integer()), nil
-}
-
-func filterDefault(in *Value, param *Value) (*Value, *Error) {
- if !in.IsTrue() {
- return param, nil
- }
- return in, nil
-}
-
-func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
- if in.IsNil() {
- return param, nil
- }
- return in, nil
-}
-
-func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
- if param.Integer() == 0 {
- return AsValue(false), nil
- }
- return AsValue(in.Integer()%param.Integer() == 0), nil
-}
-
-func filterFirst(in *Value, param *Value) (*Value, *Error) {
- if in.CanSlice() && in.Len() > 0 {
- return in.Index(0), nil
- }
- return AsValue(""), nil
-}
-
-func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
- val := in.Float()
-
- decimals := -1
- if !param.IsNil() {
- // Any argument provided?
- decimals = param.Integer()
- }
-
- // if the argument is not a number (e. g. empty), the default
- // behaviour is trim the result
- trim := !param.IsNumber()
-
- if decimals <= 0 {
- // argument is negative or zero, so we
- // want the output being trimmed
- decimals = -decimals
- trim = true
- }
-
- if trim {
- // Remove zeroes
- if float64(int(val)) == val {
- return AsValue(in.Integer()), nil
- }
- }
-
- return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
-}
-
-func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
- i := param.Integer()
- l := len(in.String()) // do NOT use in.Len() here!
- if i <= 0 || i > l {
- return in, nil
- }
- return AsValue(in.String()[l-i] - 48), nil
-}
-
-const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
-
-func filterIriencode(in *Value, param *Value) (*Value, *Error) {
- var b bytes.Buffer
-
- sin := in.String()
- for _, r := range sin {
- if strings.IndexRune(filterIRIChars, r) >= 0 {
- b.WriteRune(r)
- } else {
- b.WriteString(url.QueryEscape(string(r)))
- }
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterJoin(in *Value, param *Value) (*Value, *Error) {
- if !in.CanSlice() {
- return in, nil
- }
- sep := param.String()
- sl := make([]string, 0, in.Len())
- for i := 0; i < in.Len(); i++ {
- sl = append(sl, in.Index(i).String())
- }
- return AsValue(strings.Join(sl, sep)), nil
-}
-
-func filterLast(in *Value, param *Value) (*Value, *Error) {
- if in.CanSlice() && in.Len() > 0 {
- return in.Index(in.Len() - 1), nil
- }
- return AsValue(""), nil
-}
-
-func filterUpper(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.ToUpper(in.String())), nil
-}
-
-func filterLower(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.ToLower(in.String())), nil
-}
-
-func filterMakelist(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- result := make([]string, 0, len(s))
- for _, c := range s {
- result = append(result, string(c))
- }
- return AsValue(result), nil
-}
-
-func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
- if in.Len() <= 0 {
- return AsValue(""), nil
- }
- t := in.String()
- r, size := utf8.DecodeRuneInString(t)
- return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
-}
-
-func filterCenter(in *Value, param *Value) (*Value, *Error) {
- width := param.Integer()
- slen := in.Len()
- if width <= slen {
- return in, nil
- }
-
- spaces := width - slen
- left := spaces/2 + spaces%2
- right := spaces / 2
-
- return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
- in.String(), strings.Repeat(" ", right))), nil
-}
-
-func filterDate(in *Value, param *Value) (*Value, *Error) {
- t, isTime := in.Interface().(time.Time)
- if !isTime {
- return nil, &Error{
- Sender: "filter:date",
- OrigError: errors.New("filter input argument must be of type 'time.Time'"),
- }
- }
- return AsValue(t.Format(param.String())), nil
-}
-
-func filterFloat(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Float()), nil
-}
-
-func filterInteger(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Integer()), nil
-}
-
-func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
- if in.Len() == 0 {
- return in, nil
- }
-
- var b bytes.Buffer
-
- // Newline =
- // Double newline = ...
- lines := strings.Split(in.String(), "\n")
- lenlines := len(lines)
-
- opened := false
-
- for idx, line := range lines {
-
- if !opened {
- b.WriteString("")
- opened = true
- }
-
- b.WriteString(line)
-
- if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
- // We've not reached the end
- if strings.TrimSpace(lines[idx+1]) == "" {
- // Next line is empty
- if opened {
- b.WriteString("
")
- opened = false
- }
- } else {
- b.WriteString(" ")
- }
- }
- }
-
- if opened {
- b.WriteString("
")
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterSplit(in *Value, param *Value) (*Value, *Error) {
- chunks := strings.Split(in.String(), param.String())
-
- return AsValue(chunks), nil
-}
-
-func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.Replace(in.String(), "\n", " ", -1)), nil
-}
-
-func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
- lines := strings.Split(in.String(), "\n")
- output := make([]string, 0, len(lines))
- for idx, line := range lines {
- output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
- }
- return AsValue(strings.Join(output, "\n")), nil
-}
-
-func filterLjust(in *Value, param *Value) (*Value, *Error) {
- times := param.Integer() - in.Len()
- if times < 0 {
- times = 0
- }
- return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
-}
-
-func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
- return AsValue(url.QueryEscape(in.String())), nil
-}
-
-// TODO: This regexp could do some work
-var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
-var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
-
-func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
- var soutErr error
- sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
- var prefix string
- var suffix string
- if strings.HasPrefix(raw_url, " ") {
- prefix = " "
- }
- if strings.HasSuffix(raw_url, " ") {
- suffix = " "
- }
-
- raw_url = strings.TrimSpace(raw_url)
-
- t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
- if err != nil {
- soutErr = err
- return ""
- }
- url := t.String()
-
- if !strings.HasPrefix(url, "http") {
- url = fmt.Sprintf("http://%s", url)
- }
-
- title := raw_url
-
- if trunc > 3 && len(title) > trunc {
- title = fmt.Sprintf("%s...", title[:trunc-3])
- }
-
- if autoescape {
- t, err := ApplyFilter("escape", AsValue(title), nil)
- if err != nil {
- soutErr = err
- return ""
- }
- title = t.String()
- }
-
- return fmt.Sprintf(`%s%s %s`, prefix, url, title, suffix)
- })
- if soutErr != nil {
- return "", soutErr
- }
-
- sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
- title := mail
-
- if trunc > 3 && len(title) > trunc {
- title = fmt.Sprintf("%s...", title[:trunc-3])
- }
-
- return fmt.Sprintf(`%s `, mail, title)
- })
-
- return sout, nil
-}
-
-func filterUrlize(in *Value, param *Value) (*Value, *Error) {
- autoescape := true
- if param.IsBool() {
- autoescape = param.Bool()
- }
-
- s, err := filterUrlizeHelper(in.String(), autoescape, -1)
- if err != nil {
-
- }
-
- return AsValue(s), nil
-}
-
-func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
- s, err := filterUrlizeHelper(in.String(), true, param.Integer())
- if err != nil {
- return nil, &Error{
- Sender: "filter:urlizetrunc",
- OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
- }
- }
- return AsValue(s), nil
-}
-
-func filterStringformat(in *Value, param *Value) (*Value, *Error) {
- return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
-}
-
-var reStriptags = regexp.MustCompile("<[^>]*?>")
-
-func filterStriptags(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
-
- // Strip all tags
- s = reStriptags.ReplaceAllString(s, "")
-
- return AsValue(strings.TrimSpace(s)), nil
-}
-
-// https://en.wikipedia.org/wiki/Phoneword
-var filterPhone2numericMap = map[string]string{
- "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
- "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
- "w": "9", "x": "9", "y": "9", "z": "9",
-}
-
-func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
- sin := in.String()
- for k, v := range filterPhone2numericMap {
- sin = strings.Replace(sin, k, v, -1)
- sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
- }
- return AsValue(sin), nil
-}
-
-func filterPluralize(in *Value, param *Value) (*Value, *Error) {
- if in.IsNumber() {
- // Works only on numbers
- if param.Len() > 0 {
- endings := strings.Split(param.String(), ",")
- if len(endings) > 2 {
- return nil, &Error{
- Sender: "filter:pluralize",
- OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
- }
- }
- if len(endings) == 1 {
- // 1 argument
- if in.Integer() != 1 {
- return AsValue(endings[0]), nil
- }
- } else {
- if in.Integer() != 1 {
- // 2 arguments
- return AsValue(endings[1]), nil
- }
- return AsValue(endings[0]), nil
- }
- } else {
- if in.Integer() != 1 {
- // return default 's'
- return AsValue("s"), nil
- }
- }
-
- return AsValue(""), nil
- }
- return nil, &Error{
- Sender: "filter:pluralize",
- OrigError: errors.New("filter 'pluralize' does only work on numbers"),
- }
-}
-
-func filterRandom(in *Value, param *Value) (*Value, *Error) {
- if !in.CanSlice() || in.Len() <= 0 {
- return in, nil
- }
- i := rand.Intn(in.Len())
- return in.Index(i), nil
-}
-
-func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- tags := strings.Split(param.String(), ",")
-
- // Strip only specific tags
- for _, tag := range tags {
- re := regexp.MustCompile(fmt.Sprintf("?%s/?>", tag))
- s = re.ReplaceAllString(s, "")
- }
-
- return AsValue(strings.TrimSpace(s)), nil
-}
-
-func filterRjust(in *Value, param *Value) (*Value, *Error) {
- return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
-}
-
-func filterSlice(in *Value, param *Value) (*Value, *Error) {
- comp := strings.Split(param.String(), ":")
- if len(comp) != 2 {
- return nil, &Error{
- Sender: "filter:slice",
- OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
- }
- }
-
- if !in.CanSlice() {
- return in, nil
- }
-
- from := AsValue(comp[0]).Integer()
- to := in.Len()
-
- if from > to {
- from = to
- }
-
- vto := AsValue(comp[1]).Integer()
- if vto >= from && vto <= in.Len() {
- to = vto
- }
-
- return in.Slice(from, to), nil
-}
-
-func filterTitle(in *Value, param *Value) (*Value, *Error) {
- if !in.IsString() {
- return AsValue(""), nil
- }
- return AsValue(strings.Title(strings.ToLower(in.String()))), nil
-}
-
-func filterWordcount(in *Value, param *Value) (*Value, *Error) {
- return AsValue(len(strings.Fields(in.String()))), nil
-}
-
-func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
- words := strings.Fields(in.String())
- wordsLen := len(words)
- wrapAt := param.Integer()
- if wrapAt <= 0 {
- return in, nil
- }
-
- linecount := wordsLen/wrapAt + wordsLen%wrapAt
- lines := make([]string, 0, linecount)
- for i := 0; i < linecount; i++ {
- lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
- }
- return AsValue(strings.Join(lines, "\n")), nil
-}
-
-func filterYesno(in *Value, param *Value) (*Value, *Error) {
- choices := map[int]string{
- 0: "yes",
- 1: "no",
- 2: "maybe",
- }
- paramString := param.String()
- customChoices := strings.Split(paramString, ",")
- if len(paramString) > 0 {
- if len(customChoices) > 3 {
- return nil, &Error{
- Sender: "filter:yesno",
- OrigError: fmt.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
- }
- }
- if len(customChoices) < 2 {
- return nil, &Error{
- Sender: "filter:yesno",
- OrigError: fmt.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
- }
- }
-
- // Map to the options now
- choices[0] = customChoices[0]
- choices[1] = customChoices[1]
- if len(customChoices) == 3 {
- choices[2] = customChoices[2]
- }
- }
-
- // maybe
- if in.IsNil() {
- return AsValue(choices[2]), nil
- }
-
- // yes
- if in.IsTrue() {
- return AsValue(choices[0]), nil
- }
-
- // no
- return AsValue(choices[1]), nil
-}
diff --git a/vendor/github.com/flosch/pongo2/helpers.go b/vendor/github.com/flosch/pongo2/helpers.go
deleted file mode 100644
index 880dbc04..00000000
--- a/vendor/github.com/flosch/pongo2/helpers.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package pongo2
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/flosch/pongo2/lexer.go b/vendor/github.com/flosch/pongo2/lexer.go
deleted file mode 100644
index f1897984..00000000
--- a/vendor/github.com/flosch/pongo2/lexer.go
+++ /dev/null
@@ -1,432 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "strings"
- "unicode/utf8"
-
- "errors"
-)
-
-const (
- TokenError = iota
- EOF
-
- TokenHTML
-
- TokenKeyword
- TokenIdentifier
- TokenString
- TokenNumber
- TokenSymbol
-)
-
-var (
- tokenSpaceChars = " \n\r\t"
- tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
- tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
- tokenDigits = "0123456789"
-
- // Available symbols in pongo2 (within filters/tag)
- TokenSymbols = []string{
- // 3-Char symbols
- "{{-", "-}}", "{%-", "-%}",
-
- // 2-Char symbols
- "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
-
- // 1-Char symbol
- "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
- }
-
- // Available keywords in pongo2
- TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
-)
-
-type TokenType int
-type Token struct {
- Filename string
- Typ TokenType
- Val string
- Line int
- Col int
- TrimWhitespaces bool
-}
-
-type lexerStateFn func() lexerStateFn
-type lexer struct {
- name string
- input string
- start int // start pos of the item
- pos int // current pos
- width int // width of last rune
- tokens []*Token
- errored bool
- startline int
- startcol int
- line int
- col int
-
- inVerbatim bool
- verbatimName string
-}
-
-func (t *Token) String() string {
- val := t.Val
- if len(val) > 1000 {
- val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:])
- }
-
- typ := ""
- switch t.Typ {
- case TokenHTML:
- typ = "HTML"
- case TokenError:
- typ = "Error"
- case TokenIdentifier:
- typ = "Identifier"
- case TokenKeyword:
- typ = "Keyword"
- case TokenNumber:
- typ = "Number"
- case TokenString:
- typ = "String"
- case TokenSymbol:
- typ = "Symbol"
- default:
- typ = "Unknown"
- }
-
- return fmt.Sprintf("",
- typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces)
-}
-
-func lex(name string, input string) ([]*Token, *Error) {
- l := &lexer{
- name: name,
- input: input,
- tokens: make([]*Token, 0, 100),
- line: 1,
- col: 1,
- startline: 1,
- startcol: 1,
- }
- l.run()
- if l.errored {
- errtoken := l.tokens[len(l.tokens)-1]
- return nil, &Error{
- Filename: name,
- Line: errtoken.Line,
- Column: errtoken.Col,
- Sender: "lexer",
- OrigError: errors.New(errtoken.Val),
- }
- }
- return l.tokens, nil
-}
-
-func (l *lexer) value() string {
- return l.input[l.start:l.pos]
-}
-
-func (l *lexer) length() int {
- return l.pos - l.start
-}
-
-func (l *lexer) emit(t TokenType) {
- tok := &Token{
- Filename: l.name,
- Typ: t,
- Val: l.value(),
- Line: l.startline,
- Col: l.startcol,
- }
-
- if t == TokenString {
- // Escape sequence \" in strings
- tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
- tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
- }
-
- if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) {
- tok.TrimWhitespaces = true
- tok.Val = strings.Replace(tok.Val, "-", "", -1)
- }
-
- l.tokens = append(l.tokens, tok)
- l.start = l.pos
- l.startline = l.line
- l.startcol = l.col
-}
-
-func (l *lexer) next() rune {
- if l.pos >= len(l.input) {
- l.width = 0
- return EOF
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = w
- l.pos += l.width
- l.col += l.width
- return r
-}
-
-func (l *lexer) backup() {
- l.pos -= l.width
- l.col -= l.width
-}
-
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-func (l *lexer) ignore() {
- l.start = l.pos
- l.startline = l.line
- l.startcol = l.col
-}
-
-func (l *lexer) accept(what string) bool {
- if strings.IndexRune(what, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-func (l *lexer) acceptRun(what string) {
- for strings.IndexRune(what, l.next()) >= 0 {
- }
- l.backup()
-}
-
-func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
- t := &Token{
- Filename: l.name,
- Typ: TokenError,
- Val: fmt.Sprintf(format, args...),
- Line: l.startline,
- Col: l.startcol,
- }
- l.tokens = append(l.tokens, t)
- l.errored = true
- l.startline = l.line
- l.startcol = l.col
- return nil
-}
-
-func (l *lexer) eof() bool {
- return l.start >= len(l.input)-1
-}
-
-func (l *lexer) run() {
- for {
- // TODO: Support verbatim tag names
- // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
- if l.inVerbatim {
- name := l.verbatimName
- if name != "" {
- name += " "
- }
- if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- w := len("{% endverbatim %}")
- l.pos += w
- l.col += w
- l.ignore()
- l.inVerbatim = false
- }
- } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- l.inVerbatim = true
- w := len("{% verbatim %}")
- l.pos += w
- l.col += w
- l.ignore()
- }
-
- if !l.inVerbatim {
- // Ignore single-line comments {# ... #}
- if strings.HasPrefix(l.input[l.pos:], "{#") {
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
-
- l.pos += 2 // pass '{#'
- l.col += 2
-
- for {
- switch l.peek() {
- case EOF:
- l.errorf("Single-line comment not closed.")
- return
- case '\n':
- l.errorf("Newline not permitted in a single-line comment.")
- return
- }
-
- if strings.HasPrefix(l.input[l.pos:], "#}") {
- l.pos += 2 // pass '#}'
- l.col += 2
- break
- }
-
- l.next()
- }
- l.ignore() // ignore whole comment
-
- // Comment skipped
- continue // next token
- }
-
- if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
- strings.HasPrefix(l.input[l.pos:], "{%") { // tag
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- l.tokenize()
- if l.errored {
- return
- }
- continue
- }
- }
-
- switch l.peek() {
- case '\n':
- l.line++
- l.col = 0
- }
- if l.next() == EOF {
- break
- }
- }
-
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
-
- if l.inVerbatim {
- l.errorf("verbatim-tag not closed, got EOF.")
- }
-}
-
-func (l *lexer) tokenize() {
- for state := l.stateCode; state != nil; {
- state = state()
- }
-}
-
-func (l *lexer) stateCode() lexerStateFn {
-outer_loop:
- for {
- switch {
- case l.accept(tokenSpaceChars):
- if l.value() == "\n" {
- return l.errorf("Newline not allowed within tag/variable.")
- }
- l.ignore()
- continue
- case l.accept(tokenIdentifierChars):
- return l.stateIdentifier
- case l.accept(tokenDigits):
- return l.stateNumber
- case l.accept(`"'`):
- return l.stateString
- }
-
- // Check for symbol
- for _, sym := range TokenSymbols {
- if strings.HasPrefix(l.input[l.start:], sym) {
- l.pos += len(sym)
- l.col += l.length()
- l.emit(TokenSymbol)
-
- if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" {
- // Tag/variable end, return after emit
- return nil
- }
-
- continue outer_loop
- }
- }
-
- break
- }
-
- // Normal shut down
- return nil
-}
-
-func (l *lexer) stateIdentifier() lexerStateFn {
- l.acceptRun(tokenIdentifierChars)
- l.acceptRun(tokenIdentifierCharsWithDigits)
- for _, kw := range TokenKeywords {
- if kw == l.value() {
- l.emit(TokenKeyword)
- return l.stateCode
- }
- }
- l.emit(TokenIdentifier)
- return l.stateCode
-}
-
-func (l *lexer) stateNumber() lexerStateFn {
- l.acceptRun(tokenDigits)
- if l.accept(tokenIdentifierCharsWithDigits) {
- // This seems to be an identifier starting with a number.
- // See https://github.com/flosch/pongo2/issues/151
- return l.stateIdentifier()
- }
- /*
- Maybe context-sensitive number lexing?
- * comments.0.Text // first comment
- * usercomments.1.0 // second user, first comment
- * if (score >= 8.5) // 8.5 as a number
-
- if l.peek() == '.' {
- l.accept(".")
- if !l.accept(tokenDigits) {
- return l.errorf("Malformed number.")
- }
- l.acceptRun(tokenDigits)
- }
- */
- l.emit(TokenNumber)
- return l.stateCode
-}
-
-func (l *lexer) stateString() lexerStateFn {
- quotationMark := l.value()
- l.ignore()
- l.startcol-- // we're starting the position at the first "
- for !l.accept(quotationMark) {
- switch l.next() {
- case '\\':
- // escape sequence
- switch l.peek() {
- case '"', '\\':
- l.next()
- default:
- return l.errorf("Unknown escape sequence: \\%c", l.peek())
- }
- case EOF:
- return l.errorf("Unexpected EOF, string not closed.")
- case '\n':
- return l.errorf("Newline in string is not allowed.")
- }
- }
- l.backup()
- l.emit(TokenString)
-
- l.next()
- l.ignore()
-
- return l.stateCode
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes.go b/vendor/github.com/flosch/pongo2/nodes.go
deleted file mode 100644
index 5b039cdf..00000000
--- a/vendor/github.com/flosch/pongo2/nodes.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package pongo2
-
-// The root document
-type nodeDocument struct {
- Nodes []INode
-}
-
-func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, n := range doc.Nodes {
- err := n.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes_html.go b/vendor/github.com/flosch/pongo2/nodes_html.go
deleted file mode 100644
index b980a3a5..00000000
--- a/vendor/github.com/flosch/pongo2/nodes_html.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package pongo2
-
-import (
- "strings"
-)
-
-type nodeHTML struct {
- token *Token
- trimLeft bool
- trimRight bool
-}
-
-func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- res := n.token.Val
- if n.trimLeft {
- res = strings.TrimLeft(res, tokenSpaceChars)
- }
- if n.trimRight {
- res = strings.TrimRight(res, tokenSpaceChars)
- }
- writer.WriteString(res)
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes_wrapper.go b/vendor/github.com/flosch/pongo2/nodes_wrapper.go
deleted file mode 100644
index d1bcb8d8..00000000
--- a/vendor/github.com/flosch/pongo2/nodes_wrapper.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package pongo2
-
-type NodeWrapper struct {
- Endtag string
- nodes []INode
-}
-
-func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, n := range wrapper.nodes {
- err := n.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/options.go b/vendor/github.com/flosch/pongo2/options.go
deleted file mode 100644
index 9c39e467..00000000
--- a/vendor/github.com/flosch/pongo2/options.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package pongo2
-
-// Options allow you to change the behavior of template-engine.
-// You can change the options before calling the Execute method.
-type Options struct {
- // If this is set to true the first newline after a block is removed (block, not variable tag!). Defaults to false.
- TrimBlocks bool
-
- // If this is set to true leading spaces and tabs are stripped from the start of a line to a block. Defaults to false
- LStripBlocks bool
-}
-
-func newOptions() *Options {
- return &Options{
- TrimBlocks: false,
- LStripBlocks: false,
- }
-}
-
-// Update updates this options from another options.
-func (opt *Options) Update(other *Options) *Options {
- opt.TrimBlocks = other.TrimBlocks
- opt.LStripBlocks = other.LStripBlocks
-
- return opt
-}
diff --git a/vendor/github.com/flosch/pongo2/parser.go b/vendor/github.com/flosch/pongo2/parser.go
deleted file mode 100644
index 19553f17..00000000
--- a/vendor/github.com/flosch/pongo2/parser.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "strings"
-
- "errors"
-)
-
-type INode interface {
- Execute(*ExecutionContext, TemplateWriter) *Error
-}
-
-type IEvaluator interface {
- INode
- GetPositionToken() *Token
- Evaluate(*ExecutionContext) (*Value, *Error)
- FilterApplied(name string) bool
-}
-
-// The parser provides you a comprehensive and easy tool to
-// work with the template document and arguments provided by
-// the user for your custom tag.
-//
-// The parser works on a token list which will be provided by pongo2.
-// A token is a unit you can work with. Tokens are either of type identifier,
-// string, number, keyword, HTML or symbol.
-//
-// (See Token's documentation for more about tokens)
-type Parser struct {
- name string
- idx int
- tokens []*Token
- lastToken *Token
-
- // if the parser parses a template document, here will be
- // a reference to it (needed to access the template through Tags)
- template *Template
-}
-
-// Creates a new parser to parse tokens.
-// Used inside pongo2 to parse documents and to provide an easy-to-use
-// parser for tag authors
-func newParser(name string, tokens []*Token, template *Template) *Parser {
- p := &Parser{
- name: name,
- tokens: tokens,
- template: template,
- }
- if len(tokens) > 0 {
- p.lastToken = tokens[len(tokens)-1]
- }
- return p
-}
-
-// Consume one token. It will be gone forever.
-func (p *Parser) Consume() {
- p.ConsumeN(1)
-}
-
-// Consume N tokens. They will be gone forever.
-func (p *Parser) ConsumeN(count int) {
- p.idx += count
-}
-
-// Returns the current token.
-func (p *Parser) Current() *Token {
- return p.Get(p.idx)
-}
-
-// Returns the CURRENT token if the given type matches.
-// Consumes this token on success.
-func (p *Parser) MatchType(typ TokenType) *Token {
- if t := p.PeekType(typ); t != nil {
- p.Consume()
- return t
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type AND value matches.
-// Consumes this token on success.
-func (p *Parser) Match(typ TokenType, val string) *Token {
- if t := p.Peek(typ, val); t != nil {
- p.Consume()
- return t
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type AND *one* of
-// the given values matches.
-// Consumes this token on success.
-func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
- for _, val := range vals {
- if t := p.Peek(typ, val); t != nil {
- p.Consume()
- return t
- }
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type matches.
-// It DOES NOT consume the token.
-func (p *Parser) PeekType(typ TokenType) *Token {
- return p.PeekTypeN(0, typ)
-}
-
-// Returns the CURRENT token if the given type AND value matches.
-// It DOES NOT consume the token.
-func (p *Parser) Peek(typ TokenType, val string) *Token {
- return p.PeekN(0, typ, val)
-}
-
-// Returns the CURRENT token if the given type AND *one* of
-// the given values matches.
-// It DOES NOT consume the token.
-func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
- for _, v := range vals {
- t := p.PeekN(0, typ, v)
- if t != nil {
- return t
- }
- }
- return nil
-}
-
-// Returns the tokens[current position + shift] token if the
-// given type AND value matches for that token.
-// DOES NOT consume the token.
-func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
- t := p.Get(p.idx + shift)
- if t != nil {
- if t.Typ == typ && t.Val == val {
- return t
- }
- }
- return nil
-}
-
-// Returns the tokens[current position + shift] token if the given type matches.
-// DOES NOT consume the token for that token.
-func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
- t := p.Get(p.idx + shift)
- if t != nil {
- if t.Typ == typ {
- return t
- }
- }
- return nil
-}
-
-// Returns the UNCONSUMED token count.
-func (p *Parser) Remaining() int {
- return len(p.tokens) - p.idx
-}
-
-// Returns the total token count.
-func (p *Parser) Count() int {
- return len(p.tokens)
-}
-
-// Returns tokens[i] or NIL (if i >= len(tokens))
-func (p *Parser) Get(i int) *Token {
- if i < len(p.tokens) && i >= 0 {
- return p.tokens[i]
- }
- return nil
-}
-
-// Returns tokens[current-position + shift] or NIL
-// (if (current-position + i) >= len(tokens))
-func (p *Parser) GetR(shift int) *Token {
- i := p.idx + shift
- return p.Get(i)
-}
-
-// Error produces a nice error message and returns an error-object.
-// The 'token'-argument is optional. If provided, it will take
-// the token's position information. If not provided, it will
-// automatically use the CURRENT token's position information.
-func (p *Parser) Error(msg string, token *Token) *Error {
- if token == nil {
- // Set current token
- token = p.Current()
- if token == nil {
- // Set to last token
- if len(p.tokens) > 0 {
- token = p.tokens[len(p.tokens)-1]
- }
- }
- }
- var line, col int
- if token != nil {
- line = token.Line
- col = token.Col
- }
- return &Error{
- Template: p.template,
- Filename: p.name,
- Sender: "parser",
- Line: line,
- Column: col,
- Token: token,
- OrigError: errors.New(msg),
- }
-}
-
-// Wraps all nodes between starting tag and "{% endtag %}" and provides
-// one simple interface to execute the wrapped nodes.
-// It returns a parser to process provided arguments to the tag.
-func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
- wrapper := &NodeWrapper{}
-
- var tagArgs []*Token
-
- for p.Remaining() > 0 {
- // New tag, check whether we have to stop wrapping here
- if p.Peek(TokenSymbol, "{%") != nil {
- tagIdent := p.PeekTypeN(1, TokenIdentifier)
-
- if tagIdent != nil {
- // We've found a (!) end-tag
-
- found := false
- for _, n := range names {
- if tagIdent.Val == n {
- found = true
- break
- }
- }
-
- // We only process the tag if we've found an end tag
- if found {
- // Okay, endtag found.
- p.ConsumeN(2) // '{%' tagname
-
- for {
- if p.Match(TokenSymbol, "%}") != nil {
- // Okay, end the wrapping here
- wrapper.Endtag = tagIdent.Val
- return wrapper, newParser(p.template.name, tagArgs, p.template), nil
- }
- t := p.Current()
- p.Consume()
- if t == nil {
- return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
- }
- tagArgs = append(tagArgs, t)
- }
- }
- }
-
- }
-
- // Otherwise process next element to be wrapped
- node, err := p.parseDocElement()
- if err != nil {
- return nil, nil, err
- }
- wrapper.nodes = append(wrapper.nodes, node)
- }
-
- return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
- p.lastToken)
-}
-
-// Skips all nodes between starting tag and "{% endtag %}"
-func (p *Parser) SkipUntilTag(names ...string) *Error {
- for p.Remaining() > 0 {
- // New tag, check whether we have to stop wrapping here
- if p.Peek(TokenSymbol, "{%") != nil {
- tagIdent := p.PeekTypeN(1, TokenIdentifier)
-
- if tagIdent != nil {
- // We've found a (!) end-tag
-
- found := false
- for _, n := range names {
- if tagIdent.Val == n {
- found = true
- break
- }
- }
-
- // We only process the tag if we've found an end tag
- if found {
- // Okay, endtag found.
- p.ConsumeN(2) // '{%' tagname
-
- for {
- if p.Match(TokenSymbol, "%}") != nil {
- // Done skipping, exit.
- return nil
- }
- }
- }
- }
- }
- t := p.Current()
- p.Consume()
- if t == nil {
- return p.Error("Unexpected EOF.", p.lastToken)
- }
- }
-
- return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken)
-}
diff --git a/vendor/github.com/flosch/pongo2/parser_document.go b/vendor/github.com/flosch/pongo2/parser_document.go
deleted file mode 100644
index e3ac2c8e..00000000
--- a/vendor/github.com/flosch/pongo2/parser_document.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package pongo2
-
-// Doc = { ( Filter | Tag | HTML ) }
-func (p *Parser) parseDocElement() (INode, *Error) {
- t := p.Current()
-
- switch t.Typ {
- case TokenHTML:
- n := &nodeHTML{token: t}
- left := p.PeekTypeN(-1, TokenSymbol)
- right := p.PeekTypeN(1, TokenSymbol)
- n.trimLeft = left != nil && left.TrimWhitespaces
- n.trimRight = right != nil && right.TrimWhitespaces
- p.Consume() // consume HTML element
- return n, nil
- case TokenSymbol:
- switch t.Val {
- case "{{":
- // parse variable
- variable, err := p.parseVariableElement()
- if err != nil {
- return nil, err
- }
- return variable, nil
- case "{%":
- // parse tag
- tag, err := p.parseTagElement()
- if err != nil {
- return nil, err
- }
- return tag, nil
- }
- }
- return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
-}
-
-func (tpl *Template) parse() *Error {
- tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
- doc, err := tpl.parser.parseDocument()
- if err != nil {
- return err
- }
- tpl.root = doc
- return nil
-}
-
-func (p *Parser) parseDocument() (*nodeDocument, *Error) {
- doc := &nodeDocument{}
-
- for p.Remaining() > 0 {
- node, err := p.parseDocElement()
- if err != nil {
- return nil, err
- }
- doc.Nodes = append(doc.Nodes, node)
- }
-
- return doc, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/parser_expression.go b/vendor/github.com/flosch/pongo2/parser_expression.go
deleted file mode 100644
index 215b0afb..00000000
--- a/vendor/github.com/flosch/pongo2/parser_expression.go
+++ /dev/null
@@ -1,517 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math"
-)
-
-type Expression struct {
- // TODO: Add location token?
- expr1 IEvaluator
- expr2 IEvaluator
- opToken *Token
-}
-
-type relationalExpression struct {
- // TODO: Add location token?
- expr1 IEvaluator
- expr2 IEvaluator
- opToken *Token
-}
-
-type simpleExpression struct {
- negate bool
- negativeSign bool
- term1 IEvaluator
- term2 IEvaluator
- opToken *Token
-}
-
-type term struct {
- // TODO: Add location token?
- factor1 IEvaluator
- factor2 IEvaluator
- opToken *Token
-}
-
-type power struct {
- // TODO: Add location token?
- power1 IEvaluator
- power2 IEvaluator
-}
-
-func (expr *Expression) FilterApplied(name string) bool {
- return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
- (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
-}
-
-func (expr *relationalExpression) FilterApplied(name string) bool {
- return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
- (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
-}
-
-func (expr *simpleExpression) FilterApplied(name string) bool {
- return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
- (expr.term2 != nil && expr.term2.FilterApplied(name)))
-}
-
-func (expr *term) FilterApplied(name string) bool {
- return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
- (expr.factor2 != nil && expr.factor2.FilterApplied(name)))
-}
-
-func (expr *power) FilterApplied(name string) bool {
- return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
- (expr.power2 != nil && expr.power2.FilterApplied(name)))
-}
-
-func (expr *Expression) GetPositionToken() *Token {
- return expr.expr1.GetPositionToken()
-}
-
-func (expr *relationalExpression) GetPositionToken() *Token {
- return expr.expr1.GetPositionToken()
-}
-
-func (expr *simpleExpression) GetPositionToken() *Token {
- return expr.term1.GetPositionToken()
-}
-
-func (expr *term) GetPositionToken() *Token {
- return expr.factor1.GetPositionToken()
-}
-
-func (expr *power) GetPositionToken() *Token {
- return expr.power1.GetPositionToken()
-}
-
-func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- v1, err := expr.expr1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.expr2 != nil {
- switch expr.opToken.Val {
- case "and", "&&":
- if !v1.IsTrue() {
- return AsValue(false), nil
- } else {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(v2.IsTrue()), nil
- }
- case "or", "||":
- if v1.IsTrue() {
- return AsValue(true), nil
- } else {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(v2.IsTrue()), nil
- }
- default:
- return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
- }
- } else {
- return v1, nil
- }
-}
-
-func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- v1, err := expr.expr1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.expr2 != nil {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "<=":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() <= v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- tm1, tm2 := v1.Time(), v2.Time()
- return AsValue(tm1.Before(tm2) || tm1.Equal(tm2)), nil
- }
- return AsValue(v1.Integer() <= v2.Integer()), nil
- case ">=":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() >= v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- tm1, tm2 := v1.Time(), v2.Time()
- return AsValue(tm1.After(tm2) || tm1.Equal(tm2)), nil
- }
- return AsValue(v1.Integer() >= v2.Integer()), nil
- case "==":
- return AsValue(v1.EqualValueTo(v2)), nil
- case ">":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() > v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- return AsValue(v1.Time().After(v2.Time())), nil
- }
- return AsValue(v1.Integer() > v2.Integer()), nil
- case "<":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() < v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- return AsValue(v1.Time().Before(v2.Time())), nil
- }
- return AsValue(v1.Integer() < v2.Integer()), nil
- case "!=", "<>":
- return AsValue(!v1.EqualValueTo(v2)), nil
- case "in":
- return AsValue(v2.Contains(v1)), nil
- default:
- return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
- }
- } else {
- return v1, nil
- }
-}
-
-func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- t1, err := expr.term1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- result := t1
-
- if expr.negate {
- result = result.Negate()
- }
-
- if expr.negativeSign {
- if result.IsNumber() {
- switch {
- case result.IsFloat():
- result = AsValue(-1 * result.Float())
- case result.IsInteger():
- result = AsValue(-1 * result.Integer())
- default:
- return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil)
- }
- } else {
- return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
- }
- }
-
- if expr.term2 != nil {
- t2, err := expr.term2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "+":
- if result.IsFloat() || t2.IsFloat() {
- // Result will be a float
- return AsValue(result.Float() + t2.Float()), nil
- }
- // Result will be an integer
- return AsValue(result.Integer() + t2.Integer()), nil
- case "-":
- if result.IsFloat() || t2.IsFloat() {
- // Result will be a float
- return AsValue(result.Float() - t2.Float()), nil
- }
- // Result will be an integer
- return AsValue(result.Integer() - t2.Integer()), nil
- default:
- return nil, ctx.Error("Unimplemented", expr.GetPositionToken())
- }
- }
-
- return result, nil
-}
-
-func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- f1, err := expr.factor1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.factor2 != nil {
- f2, err := expr.factor2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "*":
- if f1.IsFloat() || f2.IsFloat() {
- // Result will be float
- return AsValue(f1.Float() * f2.Float()), nil
- }
- // Result will be int
- return AsValue(f1.Integer() * f2.Integer()), nil
- case "/":
- if f1.IsFloat() || f2.IsFloat() {
- // Result will be float
- return AsValue(f1.Float() / f2.Float()), nil
- }
- // Result will be int
- return AsValue(f1.Integer() / f2.Integer()), nil
- case "%":
- // Result will be int
- return AsValue(f1.Integer() % f2.Integer()), nil
- default:
- return nil, ctx.Error("unimplemented", expr.opToken)
- }
- } else {
- return f1, nil
- }
-}
-
-func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- p1, err := expr.power1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.power2 != nil {
- p2, err := expr.power2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(math.Pow(p1.Float(), p2.Float())), nil
- }
- return p1, nil
-}
-
-func (p *Parser) parseFactor() (IEvaluator, *Error) {
- if p.Match(TokenSymbol, "(") != nil {
- expr, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- if p.Match(TokenSymbol, ")") == nil {
- return nil, p.Error("Closing bracket expected after expression", nil)
- }
- return expr, nil
- }
-
- return p.parseVariableOrLiteralWithFilter()
-}
-
-func (p *Parser) parsePower() (IEvaluator, *Error) {
- pw := new(power)
-
- power1, err := p.parseFactor()
- if err != nil {
- return nil, err
- }
- pw.power1 = power1
-
- if p.Match(TokenSymbol, "^") != nil {
- power2, err := p.parsePower()
- if err != nil {
- return nil, err
- }
- pw.power2 = power2
- }
-
- if pw.power2 == nil {
- // Shortcut for faster evaluation
- return pw.power1, nil
- }
-
- return pw, nil
-}
-
-func (p *Parser) parseTerm() (IEvaluator, *Error) {
- returnTerm := new(term)
-
- factor1, err := p.parsePower()
- if err != nil {
- return nil, err
- }
- returnTerm.factor1 = factor1
-
- for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
- if returnTerm.opToken != nil {
- // Create new sub-term
- returnTerm = &term{
- factor1: returnTerm,
- }
- }
-
- op := p.Current()
- p.Consume()
-
- factor2, err := p.parsePower()
- if err != nil {
- return nil, err
- }
-
- returnTerm.opToken = op
- returnTerm.factor2 = factor2
- }
-
- if returnTerm.opToken == nil {
- // Shortcut for faster evaluation
- return returnTerm.factor1, nil
- }
-
- return returnTerm, nil
-}
-
-func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
- expr := new(simpleExpression)
-
- if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
- if sign.Val == "-" {
- expr.negativeSign = true
- }
- }
-
- if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
- expr.negate = true
- }
-
- term1, err := p.parseTerm()
- if err != nil {
- return nil, err
- }
- expr.term1 = term1
-
- for p.PeekOne(TokenSymbol, "+", "-") != nil {
- if expr.opToken != nil {
- // New sub expr
- expr = &simpleExpression{
- term1: expr,
- }
- }
-
- op := p.Current()
- p.Consume()
-
- term2, err := p.parseTerm()
- if err != nil {
- return nil, err
- }
-
- expr.term2 = term2
- expr.opToken = op
- }
-
- if expr.negate == false && expr.negativeSign == false && expr.term2 == nil {
- // Shortcut for faster evaluation
- return expr.term1, nil
- }
-
- return expr, nil
-}
-
-func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
- expr1, err := p.parseSimpleExpression()
- if err != nil {
- return nil, err
- }
-
- expr := &relationalExpression{
- expr1: expr1,
- }
-
- if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
- expr2, err := p.parseRelationalExpression()
- if err != nil {
- return nil, err
- }
- expr.opToken = t
- expr.expr2 = expr2
- } else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
- expr2, err := p.parseSimpleExpression()
- if err != nil {
- return nil, err
- }
- expr.opToken = t
- expr.expr2 = expr2
- }
-
- if expr.expr2 == nil {
- // Shortcut for faster evaluation
- return expr.expr1, nil
- }
-
- return expr, nil
-}
-
-func (p *Parser) ParseExpression() (IEvaluator, *Error) {
- rexpr1, err := p.parseRelationalExpression()
- if err != nil {
- return nil, err
- }
-
- exp := &Expression{
- expr1: rexpr1,
- }
-
- if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
- op := p.Current()
- p.Consume()
- expr2, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- exp.expr2 = expr2
- exp.opToken = op
- }
-
- if exp.expr2 == nil {
- // Shortcut for faster evaluation
- return exp.expr1, nil
- }
-
- return exp, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/pongo2.go b/vendor/github.com/flosch/pongo2/pongo2.go
deleted file mode 100644
index eda3aa07..00000000
--- a/vendor/github.com/flosch/pongo2/pongo2.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package pongo2
-
-// Version string
-const Version = "dev"
-
-// Must panics, if a Template couldn't successfully parsed. This is how you
-// would use it:
-// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
-func Must(tpl *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return tpl
-}
diff --git a/vendor/github.com/flosch/pongo2/tags.go b/vendor/github.com/flosch/pongo2/tags.go
deleted file mode 100644
index 710ee252..00000000
--- a/vendor/github.com/flosch/pongo2/tags.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package pongo2
-
-/* Incomplete:
- -----------
-
- verbatim (only the "name" argument is missing for verbatim)
-
- Reconsideration:
- ----------------
-
- debug (reason: not sure what to output yet)
- regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
-
- Following built-in tags wont be added:
- --------------------------------------
-
- csrf_token (reason: web-framework specific)
- load (reason: python-specific)
- url (reason: web-framework specific)
-*/
-
-import (
- "fmt"
-)
-
-type INodeTag interface {
- INode
-}
-
-// This is the function signature of the tag's parser you will have
-// to implement in order to create a new tag.
-//
-// 'doc' is providing access to the whole document while 'arguments'
-// is providing access to the user's arguments to the tag:
-//
-// {% your_tag_name some "arguments" 123 %}
-//
-// start_token will be the *Token with the tag's name in it (here: your_tag_name).
-//
-// Please see the Parser documentation on how to use the parser.
-// See RegisterTag()'s documentation for more information about
-// writing a tag as well.
-type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
-
-type tag struct {
- name string
- parser TagParser
-}
-
-var tags map[string]*tag
-
-func init() {
- tags = make(map[string]*tag)
-}
-
-// Registers a new tag. You usually want to call this
-// function in the tag's init() function:
-// http://golang.org/doc/effective_go.html#init
-//
-// See http://www.florian-schlachter.de/post/pongo2/ for more about
-// writing filters and tags.
-func RegisterTag(name string, parserFn TagParser) error {
- _, existing := tags[name]
- if existing {
- return fmt.Errorf("tag with name '%s' is already registered", name)
- }
- tags[name] = &tag{
- name: name,
- parser: parserFn,
- }
- return nil
-}
-
-// Replaces an already registered tag with a new implementation. Use this
-// function with caution since it allows you to change existing tag behaviour.
-func ReplaceTag(name string, parserFn TagParser) error {
- _, existing := tags[name]
- if !existing {
- return fmt.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name)
- }
- tags[name] = &tag{
- name: name,
- parser: parserFn,
- }
- return nil
-}
-
-// Tag = "{%" IDENT ARGS "%}"
-func (p *Parser) parseTagElement() (INodeTag, *Error) {
- p.Consume() // consume "{%"
- tokenName := p.MatchType(TokenIdentifier)
-
- // Check for identifier
- if tokenName == nil {
- return nil, p.Error("Tag name must be an identifier.", nil)
- }
-
- // Check for the existing tag
- tag, exists := tags[tokenName.Val]
- if !exists {
- // Does not exists
- return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
- }
-
- // Check sandbox tag restriction
- if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
- return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
- }
-
- var argsToken []*Token
- for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
- // Add token to args
- argsToken = append(argsToken, p.Current())
- p.Consume() // next token
- }
-
- // EOF?
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
- }
-
- p.Match(TokenSymbol, "%}")
-
- argParser := newParser(p.name, argsToken, p.template)
- if len(argsToken) == 0 {
- // This is done to have nice EOF error messages
- argParser.lastToken = tokenName
- }
-
- p.template.level++
- defer func() { p.template.level-- }()
- return tag.parser(p, tokenName, argParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_autoescape.go b/vendor/github.com/flosch/pongo2/tags_autoescape.go
deleted file mode 100644
index 590a1db3..00000000
--- a/vendor/github.com/flosch/pongo2/tags_autoescape.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pongo2
-
-type tagAutoescapeNode struct {
- wrapper *NodeWrapper
- autoescape bool
-}
-
-func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- old := ctx.Autoescape
- ctx.Autoescape = node.autoescape
-
- err := node.wrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
-
- ctx.Autoescape = old
-
- return nil
-}
-
-func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- autoescapeNode := &tagAutoescapeNode{}
-
- wrapper, _, err := doc.WrapUntilTag("endautoescape")
- if err != nil {
- return nil, err
- }
- autoescapeNode.wrapper = wrapper
-
- modeToken := arguments.MatchType(TokenIdentifier)
- if modeToken == nil {
- return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
- }
- if modeToken.Val == "on" {
- autoescapeNode.autoescape = true
- } else if modeToken.Val == "off" {
- autoescapeNode.autoescape = false
- } else {
- return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
- }
-
- return autoescapeNode, nil
-}
-
-func init() {
- RegisterTag("autoescape", tagAutoescapeParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_block.go b/vendor/github.com/flosch/pongo2/tags_block.go
deleted file mode 100644
index 86145f32..00000000
--- a/vendor/github.com/flosch/pongo2/tags_block.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
-)
-
-type tagBlockNode struct {
- name string
-}
-
-func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper {
- nodeWrappers := make([]*NodeWrapper, 0)
- var t *NodeWrapper
-
- for tpl != nil {
- t = tpl.blocks[node.name]
- if t != nil {
- nodeWrappers = append(nodeWrappers, t)
- }
- tpl = tpl.child
- }
-
- return nodeWrappers
-}
-
-func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- tpl := ctx.template
- if tpl == nil {
- panic("internal error: tpl == nil")
- }
-
- // Determine the block to execute
- blockWrappers := node.getBlockWrappers(tpl)
- lenBlockWrappers := len(blockWrappers)
-
- if lenBlockWrappers == 0 {
- return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil)
- }
-
- blockWrapper := blockWrappers[lenBlockWrappers-1]
- ctx.Private["block"] = tagBlockInformation{
- ctx: ctx,
- wrappers: blockWrappers[0 : lenBlockWrappers-1],
- }
- err := blockWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-type tagBlockInformation struct {
- ctx *ExecutionContext
- wrappers []*NodeWrapper
-}
-
-func (t tagBlockInformation) Super() string {
- lenWrappers := len(t.wrappers)
-
- if lenWrappers == 0 {
- return ""
- }
-
- superCtx := NewChildExecutionContext(t.ctx)
- superCtx.Private["block"] = tagBlockInformation{
- ctx: t.ctx,
- wrappers: t.wrappers[0 : lenWrappers-1],
- }
-
- blockWrapper := t.wrappers[lenWrappers-1]
- buf := bytes.NewBufferString("")
- err := blockWrapper.Execute(superCtx, &templateWriter{buf})
- if err != nil {
- return ""
- }
- return buf.String()
-}
-
-func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- if arguments.Count() == 0 {
- return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
- }
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
- }
-
- if arguments.Remaining() != 0 {
- return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
- }
-
- wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
- if err != nil {
- return nil, err
- }
- if endtagargs.Remaining() > 0 {
- endtagnameToken := endtagargs.MatchType(TokenIdentifier)
- if endtagnameToken != nil {
- if endtagnameToken.Val != nameToken.Val {
- return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
- nameToken.Val, endtagnameToken.Val), nil)
- }
- }
-
- if endtagnameToken == nil || endtagargs.Remaining() > 0 {
- return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
- }
- }
-
- tpl := doc.template
- if tpl == nil {
- panic("internal error: tpl == nil")
- }
- _, hasBlock := tpl.blocks[nameToken.Val]
- if !hasBlock {
- tpl.blocks[nameToken.Val] = wrapper
- } else {
- return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
- }
-
- return &tagBlockNode{name: nameToken.Val}, nil
-}
-
-func init() {
- RegisterTag("block", tagBlockParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_comment.go b/vendor/github.com/flosch/pongo2/tags_comment.go
deleted file mode 100644
index 56a02ed9..00000000
--- a/vendor/github.com/flosch/pongo2/tags_comment.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package pongo2
-
-type tagCommentNode struct{}
-
-func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- commentNode := &tagCommentNode{}
-
- // TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
- err := doc.SkipUntilTag("endcomment")
- if err != nil {
- return nil, err
- }
-
- if arguments.Count() != 0 {
- return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
- }
-
- return commentNode, nil
-}
-
-func init() {
- RegisterTag("comment", tagCommentParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_cycle.go b/vendor/github.com/flosch/pongo2/tags_cycle.go
deleted file mode 100644
index ffbd254e..00000000
--- a/vendor/github.com/flosch/pongo2/tags_cycle.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package pongo2
-
-type tagCycleValue struct {
- node *tagCycleNode
- value *Value
-}
-
-type tagCycleNode struct {
- position *Token
- args []IEvaluator
- idx int
- asName string
- silent bool
-}
-
-func (cv *tagCycleValue) String() string {
- return cv.value.String()
-}
-
-func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- item := node.args[node.idx%len(node.args)]
- node.idx++
-
- val, err := item.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if t, ok := val.Interface().(*tagCycleValue); ok {
- // {% cycle "test1" "test2"
- // {% cycle cycleitem %}
-
- // Update the cycle value with next value
- item := t.node.args[t.node.idx%len(t.node.args)]
- t.node.idx++
-
- val, err := item.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- t.value = val
-
- if !t.node.silent {
- writer.WriteString(val.String())
- }
- } else {
- // Regular call
-
- cycleValue := &tagCycleValue{
- node: node,
- value: val,
- }
-
- if node.asName != "" {
- ctx.Private[node.asName] = cycleValue
- }
- if !node.silent {
- writer.WriteString(val.String())
- }
- }
-
- return nil
-}
-
-// HINT: We're not supporting the old comma-separated list of expressions argument-style
-func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- cycleNode := &tagCycleNode{
- position: start,
- }
-
- for arguments.Remaining() > 0 {
- node, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- cycleNode.args = append(cycleNode.args, node)
-
- if arguments.MatchOne(TokenKeyword, "as") != nil {
- // as
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
- }
- cycleNode.asName = nameToken.Val
-
- if arguments.MatchOne(TokenIdentifier, "silent") != nil {
- cycleNode.silent = true
- }
-
- // Now we're finished
- break
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed cycle-tag.", nil)
- }
-
- return cycleNode, nil
-}
-
-func init() {
- RegisterTag("cycle", tagCycleParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_extends.go b/vendor/github.com/flosch/pongo2/tags_extends.go
deleted file mode 100644
index 5771020a..00000000
--- a/vendor/github.com/flosch/pongo2/tags_extends.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pongo2
-
-type tagExtendsNode struct {
- filename string
-}
-
-func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- extendsNode := &tagExtendsNode{}
-
- if doc.template.level > 1 {
- return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
- }
-
- if doc.template.parent != nil {
- // Already one parent
- return nil, arguments.Error("This template has already one parent.", start)
- }
-
- if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
- // prepared, static template
-
- // Get parent's filename
- parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- // Parse the parent
- parentTemplate, err := doc.template.set.FromFile(parentFilename)
- if err != nil {
- return nil, err.(*Error)
- }
-
- // Keep track of things
- parentTemplate.child = doc.template
- doc.template.parent = parentTemplate
- extendsNode.filename = parentFilename
- } else {
- return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
- }
-
- return extendsNode, nil
-}
-
-func init() {
- RegisterTag("extends", tagExtendsParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_filter.go b/vendor/github.com/flosch/pongo2/tags_filter.go
deleted file mode 100644
index b38fd929..00000000
--- a/vendor/github.com/flosch/pongo2/tags_filter.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package pongo2
-
-import (
- "bytes"
-)
-
-type nodeFilterCall struct {
- name string
- paramExpr IEvaluator
-}
-
-type tagFilterNode struct {
- position *Token
- bodyWrapper *NodeWrapper
- filterChain []*nodeFilterCall
-}
-
-func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
-
- err := node.bodyWrapper.Execute(ctx, temp)
- if err != nil {
- return err
- }
-
- value := AsValue(temp.String())
-
- for _, call := range node.filterChain {
- var param *Value
- if call.paramExpr != nil {
- param, err = call.paramExpr.Evaluate(ctx)
- if err != nil {
- return err
- }
- } else {
- param = AsValue(nil)
- }
- value, err = ApplyFilter(call.name, value, param)
- if err != nil {
- return ctx.Error(err.Error(), node.position)
- }
- }
-
- writer.WriteString(value.String())
-
- return nil
-}
-
-func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- filterNode := &tagFilterNode{
- position: start,
- }
-
- wrapper, _, err := doc.WrapUntilTag("endfilter")
- if err != nil {
- return nil, err
- }
- filterNode.bodyWrapper = wrapper
-
- for arguments.Remaining() > 0 {
- filterCall := &nodeFilterCall{}
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Expected a filter name (identifier).", nil)
- }
- filterCall.name = nameToken.Val
-
- if arguments.MatchOne(TokenSymbol, ":") != nil {
- // Filter parameter
- // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
- expr, err := arguments.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- filterCall.paramExpr = expr
- }
-
- filterNode.filterChain = append(filterNode.filterChain, filterCall)
-
- if arguments.MatchOne(TokenSymbol, "|") == nil {
- break
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed filter-tag arguments.", nil)
- }
-
- return filterNode, nil
-}
-
-func init() {
- RegisterTag("filter", tagFilterParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_firstof.go b/vendor/github.com/flosch/pongo2/tags_firstof.go
deleted file mode 100644
index 5b2888e2..00000000
--- a/vendor/github.com/flosch/pongo2/tags_firstof.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package pongo2
-
-type tagFirstofNode struct {
- position *Token
- args []IEvaluator
-}
-
-func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, arg := range node.args {
- val, err := arg.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if val.IsTrue() {
- if ctx.Autoescape && !arg.FilterApplied("safe") {
- val, err = ApplyFilter("escape", val, nil)
- if err != nil {
- return err
- }
- }
-
- writer.WriteString(val.String())
- return nil
- }
- }
-
- return nil
-}
-
-func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- firstofNode := &tagFirstofNode{
- position: start,
- }
-
- for arguments.Remaining() > 0 {
- node, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- firstofNode.args = append(firstofNode.args, node)
- }
-
- return firstofNode, nil
-}
-
-func init() {
- RegisterTag("firstof", tagFirstofParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_for.go b/vendor/github.com/flosch/pongo2/tags_for.go
deleted file mode 100644
index 5b0b5554..00000000
--- a/vendor/github.com/flosch/pongo2/tags_for.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package pongo2
-
-type tagForNode struct {
- key string
- value string // only for maps: for key, value in map
- objectEvaluator IEvaluator
- reversed bool
- sorted bool
-
- bodyWrapper *NodeWrapper
- emptyWrapper *NodeWrapper
-}
-
-type tagForLoopInformation struct {
- Counter int
- Counter0 int
- Revcounter int
- Revcounter0 int
- First bool
- Last bool
- Parentloop *tagForLoopInformation
-}
-
-func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
- // Backup forloop (as parentloop in public context), key-name and value-name
- forCtx := NewChildExecutionContext(ctx)
- parentloop := forCtx.Private["forloop"]
-
- // Create loop struct
- loopInfo := &tagForLoopInformation{
- First: true,
- }
-
- // Is it a loop in a loop?
- if parentloop != nil {
- loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
- }
-
- // Register loopInfo in public context
- forCtx.Private["forloop"] = loopInfo
-
- obj, err := node.objectEvaluator.Evaluate(forCtx)
- if err != nil {
- return err
- }
-
- obj.IterateOrder(func(idx, count int, key, value *Value) bool {
- // There's something to iterate over (correct type and at least 1 item)
-
- // Update loop infos and public context
- forCtx.Private[node.key] = key
- if value != nil {
- forCtx.Private[node.value] = value
- }
- loopInfo.Counter = idx + 1
- loopInfo.Counter0 = idx
- if idx == 1 {
- loopInfo.First = false
- }
- if idx+1 == count {
- loopInfo.Last = true
- }
- loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
- loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
-
- // Render elements with updated context
- err := node.bodyWrapper.Execute(forCtx, writer)
- if err != nil {
- forError = err
- return false
- }
- return true
- }, func() {
- // Nothing to iterate over (maybe wrong type or no items)
- if node.emptyWrapper != nil {
- err := node.emptyWrapper.Execute(forCtx, writer)
- if err != nil {
- forError = err
- }
- }
- }, node.reversed, node.sorted)
-
- return forError
-}
-
-func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- forNode := &tagForNode{}
-
- // Arguments parsing
- var valueToken *Token
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
- }
-
- if arguments.Match(TokenSymbol, ",") != nil {
- // Value name is provided
- valueToken = arguments.MatchType(TokenIdentifier)
- if valueToken == nil {
- return nil, arguments.Error("Value name must be an identifier.", nil)
- }
- }
-
- if arguments.Match(TokenKeyword, "in") == nil {
- return nil, arguments.Error("Expected keyword 'in'.", nil)
- }
-
- objectEvaluator, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- forNode.objectEvaluator = objectEvaluator
- forNode.key = keyToken.Val
- if valueToken != nil {
- forNode.value = valueToken.Val
- }
-
- if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
- forNode.reversed = true
- }
-
- if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
- forNode.sorted = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed for-loop arguments.", nil)
- }
-
- // Body wrapping
- wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
- if err != nil {
- return nil, err
- }
- forNode.bodyWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "empty" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endfor")
- if err != nil {
- return nil, err
- }
- forNode.emptyWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return forNode, nil
-}
-
-func init() {
- RegisterTag("for", tagForParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_if.go b/vendor/github.com/flosch/pongo2/tags_if.go
deleted file mode 100644
index 3eeaf3b4..00000000
--- a/vendor/github.com/flosch/pongo2/tags_if.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package pongo2
-
-type tagIfNode struct {
- conditions []IEvaluator
- wrappers []*NodeWrapper
-}
-
-func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for i, condition := range node.conditions {
- result, err := condition.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if result.IsTrue() {
- return node.wrappers[i].Execute(ctx, writer)
- }
- // Last condition?
- if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
- return node.wrappers[i+1].Execute(ctx, writer)
- }
- }
- return nil
-}
-
-func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifNode := &tagIfNode{}
-
- // Parse first and main IF condition
- condition, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifNode.conditions = append(ifNode.conditions, condition)
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("If-condition is malformed.", nil)
- }
-
- // Check the rest
- for {
- wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
- if err != nil {
- return nil, err
- }
- ifNode.wrappers = append(ifNode.wrappers, wrapper)
-
- if wrapper.Endtag == "elif" {
- // elif can take a condition
- condition, err = tagArgs.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifNode.conditions = append(ifNode.conditions, condition)
-
- if tagArgs.Remaining() > 0 {
- return nil, tagArgs.Error("Elif-condition is malformed.", nil)
- }
- } else {
- if tagArgs.Count() > 0 {
- // else/endif can't take any conditions
- return nil, tagArgs.Error("Arguments not allowed here.", nil)
- }
- }
-
- if wrapper.Endtag == "endif" {
- break
- }
- }
-
- return ifNode, nil
-}
-
-func init() {
- RegisterTag("if", tagIfParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifchanged.go b/vendor/github.com/flosch/pongo2/tags_ifchanged.go
deleted file mode 100644
index 45296a0a..00000000
--- a/vendor/github.com/flosch/pongo2/tags_ifchanged.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package pongo2
-
-import (
- "bytes"
-)
-
-type tagIfchangedNode struct {
- watchedExpr []IEvaluator
- lastValues []*Value
- lastContent []byte
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- if len(node.watchedExpr) == 0 {
- // Check against own rendered body
-
- buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
- err := node.thenWrapper.Execute(ctx, buf)
- if err != nil {
- return err
- }
-
- bufBytes := buf.Bytes()
- if !bytes.Equal(node.lastContent, bufBytes) {
- // Rendered content changed, output it
- writer.Write(bufBytes)
- node.lastContent = bufBytes
- }
- } else {
- nowValues := make([]*Value, 0, len(node.watchedExpr))
- for _, expr := range node.watchedExpr {
- val, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- nowValues = append(nowValues, val)
- }
-
- // Compare old to new values now
- changed := len(node.lastValues) == 0
-
- for idx, oldVal := range node.lastValues {
- if !oldVal.EqualValueTo(nowValues[idx]) {
- changed = true
- break // we can stop here because ONE value changed
- }
- }
-
- node.lastValues = nowValues
-
- if changed {
- // Render thenWrapper
- err := node.thenWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
- } else {
- // Render elseWrapper
- err := node.elseWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifchangedNode := &tagIfchangedNode{}
-
- for arguments.Remaining() > 0 {
- // Parse condition
- expr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
- if err != nil {
- return nil, err
- }
- ifchangedNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
- if err != nil {
- return nil, err
- }
- ifchangedNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifchangedNode, nil
-}
-
-func init() {
- RegisterTag("ifchanged", tagIfchangedParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifequal.go b/vendor/github.com/flosch/pongo2/tags_ifequal.go
deleted file mode 100644
index 103f1c7b..00000000
--- a/vendor/github.com/flosch/pongo2/tags_ifequal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package pongo2
-
-type tagIfEqualNode struct {
- var1, var2 IEvaluator
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- r1, err := node.var1.Evaluate(ctx)
- if err != nil {
- return err
- }
- r2, err := node.var2.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- result := r1.EqualValueTo(r2)
-
- if result {
- return node.thenWrapper.Execute(ctx, writer)
- }
- if node.elseWrapper != nil {
- return node.elseWrapper.Execute(ctx, writer)
- }
- return nil
-}
-
-func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifequalNode := &tagIfEqualNode{}
-
- // Parse two expressions
- var1, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- var2, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifequalNode.var1 = var1
- ifequalNode.var2 = var2
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
- if err != nil {
- return nil, err
- }
- ifequalNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifequal")
- if err != nil {
- return nil, err
- }
- ifequalNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifequalNode, nil
-}
-
-func init() {
- RegisterTag("ifequal", tagIfEqualParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go b/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
deleted file mode 100644
index 0d287d34..00000000
--- a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package pongo2
-
-type tagIfNotEqualNode struct {
- var1, var2 IEvaluator
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- r1, err := node.var1.Evaluate(ctx)
- if err != nil {
- return err
- }
- r2, err := node.var2.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- result := !r1.EqualValueTo(r2)
-
- if result {
- return node.thenWrapper.Execute(ctx, writer)
- }
- if node.elseWrapper != nil {
- return node.elseWrapper.Execute(ctx, writer)
- }
- return nil
-}
-
-func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifnotequalNode := &tagIfNotEqualNode{}
-
- // Parse two expressions
- var1, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- var2, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifnotequalNode.var1 = var1
- ifnotequalNode.var2 = var2
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
- if err != nil {
- return nil, err
- }
- ifnotequalNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
- if err != nil {
- return nil, err
- }
- ifnotequalNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifnotequalNode, nil
-}
-
-func init() {
- RegisterTag("ifnotequal", tagIfNotEqualParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_import.go b/vendor/github.com/flosch/pongo2/tags_import.go
deleted file mode 100644
index 7e0d6a01..00000000
--- a/vendor/github.com/flosch/pongo2/tags_import.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package pongo2
-
-import (
- "fmt"
-)
-
-type tagImportNode struct {
- position *Token
- filename string
- macros map[string]*tagMacroNode // alias/name -> macro instance
-}
-
-func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for name, macro := range node.macros {
- func(name string, macro *tagMacroNode) {
- ctx.Private[name] = func(args ...*Value) *Value {
- return macro.call(ctx, args...)
- }
- }(name, macro)
- }
- return nil
-}
-
-func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- importNode := &tagImportNode{
- position: start,
- macros: make(map[string]*tagMacroNode),
- }
-
- filenameToken := arguments.MatchType(TokenString)
- if filenameToken == nil {
- return nil, arguments.Error("Import-tag needs a filename as string.", nil)
- }
-
- importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- if arguments.Remaining() == 0 {
- return nil, arguments.Error("You must at least specify one macro to import.", nil)
- }
-
- // Compile the given template
- tpl, err := doc.template.set.FromFile(importNode.filename)
- if err != nil {
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
- }
-
- for arguments.Remaining() > 0 {
- macroNameToken := arguments.MatchType(TokenIdentifier)
- if macroNameToken == nil {
- return nil, arguments.Error("Expected macro name (identifier).", nil)
- }
-
- asName := macroNameToken.Val
- if arguments.Match(TokenKeyword, "as") != nil {
- aliasToken := arguments.MatchType(TokenIdentifier)
- if aliasToken == nil {
- return nil, arguments.Error("Expected macro alias name (identifier).", nil)
- }
- asName = aliasToken.Val
- }
-
- macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
- if !has {
- return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
- importNode.filename), macroNameToken)
- }
-
- importNode.macros[asName] = macroInstance
-
- if arguments.Remaining() == 0 {
- break
- }
-
- if arguments.Match(TokenSymbol, ",") == nil {
- return nil, arguments.Error("Expected ','.", nil)
- }
- }
-
- return importNode, nil
-}
-
-func init() {
- RegisterTag("import", tagImportParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_include.go b/vendor/github.com/flosch/pongo2/tags_include.go
deleted file mode 100644
index 6d619fda..00000000
--- a/vendor/github.com/flosch/pongo2/tags_include.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package pongo2
-
-type tagIncludeNode struct {
- tpl *Template
- filenameEvaluator IEvaluator
- lazy bool
- only bool
- filename string
- withPairs map[string]IEvaluator
- ifExists bool
-}
-
-func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- // Building the context for the template
- includeCtx := make(Context)
-
- // Fill the context with all data from the parent
- if !node.only {
- includeCtx.Update(ctx.Public)
- includeCtx.Update(ctx.Private)
- }
-
- // Put all custom with-pairs into the context
- for key, value := range node.withPairs {
- val, err := value.Evaluate(ctx)
- if err != nil {
- return err
- }
- includeCtx[key] = val
- }
-
- // Execute the template
- if node.lazy {
- // Evaluate the filename
- filename, err := node.filenameEvaluator.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if filename.String() == "" {
- return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
- }
-
- // Get include-filename
- includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
-
- includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
- if err2 != nil {
- // if this is ReadFile error, and "if_exists" flag is enabled
- if node.ifExists && err2.(*Error).Sender == "fromfile" {
- return nil
- }
- return err2.(*Error)
- }
- err2 = includedTpl.ExecuteWriter(includeCtx, writer)
- if err2 != nil {
- return err2.(*Error)
- }
- return nil
- }
- // Template is already parsed with static filename
- err := node.tpl.ExecuteWriter(includeCtx, writer)
- if err != nil {
- return err.(*Error)
- }
- return nil
-}
-
-type tagIncludeEmptyNode struct{}
-
-func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- includeNode := &tagIncludeNode{
- withPairs: make(map[string]IEvaluator),
- }
-
- if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
- // prepared, static template
-
- // "if_exists" flag
- ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
-
- // Get include-filename
- includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- // Parse the parent
- includeNode.filename = includedFilename
- includedTpl, err := doc.template.set.FromFile(includedFilename)
- if err != nil {
- // if this is ReadFile error, and "if_exists" token presents we should create and empty node
- if err.(*Error).Sender == "fromfile" && ifExists {
- return &tagIncludeEmptyNode{}, nil
- }
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
- }
- includeNode.tpl = includedTpl
- } else {
- // No String, then the user wants to use lazy-evaluation (slower, but possible)
- filenameEvaluator, err := arguments.ParseExpression()
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
- }
- includeNode.filenameEvaluator = filenameEvaluator
- includeNode.lazy = true
- includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
- }
-
- // After having parsed the filename we're gonna parse the with+only options
- if arguments.Match(TokenIdentifier, "with") != nil {
- for arguments.Remaining() > 0 {
- // We have at least one key=expr pair (because of starting "with")
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
- }
-
- includeNode.withPairs[keyToken.Val] = valueExpr
-
- // Only?
- if arguments.Match(TokenIdentifier, "only") != nil {
- includeNode.only = true
- break // stop parsing arguments because it's the last option
- }
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
- }
-
- return includeNode, nil
-}
-
-func init() {
- RegisterTag("include", tagIncludeParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_lorem.go b/vendor/github.com/flosch/pongo2/tags_lorem.go
deleted file mode 100644
index 7794f6c1..00000000
--- a/vendor/github.com/flosch/pongo2/tags_lorem.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math/rand"
- "strings"
- "time"
-)
-
-var (
- tagLoremParagraphs = strings.Split(tagLoremText, "\n")
- tagLoremWords = strings.Fields(tagLoremText)
-)
-
-type tagLoremNode struct {
- position *Token
- count int // number of paragraphs
- method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
- random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
-}
-
-func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- switch node.method {
- case "b":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
- writer.WriteString(par)
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
- writer.WriteString(par)
- }
- }
- case "w":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString(" ")
- }
- word := tagLoremWords[rand.Intn(len(tagLoremWords))]
- writer.WriteString(word)
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString(" ")
- }
- word := tagLoremWords[i%len(tagLoremWords)]
- writer.WriteString(word)
- }
- }
- case "p":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- writer.WriteString("")
- par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
- writer.WriteString(par)
- writer.WriteString("
")
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- writer.WriteString("")
- par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
- writer.WriteString(par)
- writer.WriteString("
")
-
- }
- }
- default:
- return ctx.OrigError(fmt.Errorf("unsupported method: %s", node.method), nil)
- }
-
- return nil
-}
-
-func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- loremNode := &tagLoremNode{
- position: start,
- count: 1,
- method: "b",
- }
-
- if countToken := arguments.MatchType(TokenNumber); countToken != nil {
- loremNode.count = AsValue(countToken.Val).Integer()
- }
-
- if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
- if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
- return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
- }
-
- loremNode.method = methodToken.Val
- }
-
- if arguments.MatchOne(TokenIdentifier, "random") != nil {
- loremNode.random = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
- }
-
- return loremNode, nil
-}
-
-func init() {
- rand.Seed(time.Now().Unix())
-
- RegisterTag("lorem", tagLoremParser)
-}
-
-const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
-Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
-Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
-Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
-Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
-At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
-Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
diff --git a/vendor/github.com/flosch/pongo2/tags_macro.go b/vendor/github.com/flosch/pongo2/tags_macro.go
deleted file mode 100644
index dd3e0bf4..00000000
--- a/vendor/github.com/flosch/pongo2/tags_macro.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
-)
-
-type tagMacroNode struct {
- position *Token
- name string
- argsOrder []string
- args map[string]IEvaluator
- exported bool
-
- wrapper *NodeWrapper
-}
-
-func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- ctx.Private[node.name] = func(args ...*Value) *Value {
- return node.call(ctx, args...)
- }
-
- return nil
-}
-
-func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
- argsCtx := make(Context)
-
- for k, v := range node.args {
- if v == nil {
- // User did not provided a default value
- argsCtx[k] = nil
- } else {
- // Evaluate the default value
- valueExpr, err := v.Evaluate(ctx)
- if err != nil {
- ctx.Logf(err.Error())
- return AsSafeValue(err.Error())
- }
-
- argsCtx[k] = valueExpr
- }
- }
-
- if len(args) > len(node.argsOrder) {
- // Too many arguments, we're ignoring them and just logging into debug mode.
- err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
- node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
-
- ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
- return AsSafeValue(err.Error())
- }
-
- // Make a context for the macro execution
- macroCtx := NewChildExecutionContext(ctx)
-
- // Register all arguments in the private context
- macroCtx.Private.Update(argsCtx)
-
- for idx, argValue := range args {
- macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
- }
-
- var b bytes.Buffer
- err := node.wrapper.Execute(macroCtx, &b)
- if err != nil {
- return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
- }
-
- return AsSafeValue(b.String())
-}
-
-func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- macroNode := &tagMacroNode{
- position: start,
- args: make(map[string]IEvaluator),
- }
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
- }
- macroNode.name = nameToken.Val
-
- if arguments.MatchOne(TokenSymbol, "(") == nil {
- return nil, arguments.Error("Expected '('.", nil)
- }
-
- for arguments.Match(TokenSymbol, ")") == nil {
- argNameToken := arguments.MatchType(TokenIdentifier)
- if argNameToken == nil {
- return nil, arguments.Error("Expected argument name as identifier.", nil)
- }
- macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
-
- if arguments.Match(TokenSymbol, "=") != nil {
- // Default expression follows
- argDefaultExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- macroNode.args[argNameToken.Val] = argDefaultExpr
- } else {
- // No default expression
- macroNode.args[argNameToken.Val] = nil
- }
-
- if arguments.Match(TokenSymbol, ")") != nil {
- break
- }
- if arguments.Match(TokenSymbol, ",") == nil {
- return nil, arguments.Error("Expected ',' or ')'.", nil)
- }
- }
-
- if arguments.Match(TokenKeyword, "export") != nil {
- macroNode.exported = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed macro-tag.", nil)
- }
-
- // Body wrapping
- wrapper, endargs, err := doc.WrapUntilTag("endmacro")
- if err != nil {
- return nil, err
- }
- macroNode.wrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if macroNode.exported {
- // Now register the macro if it wants to be exported
- _, has := doc.template.exportedMacros[macroNode.name]
- if has {
- return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start)
- }
- doc.template.exportedMacros[macroNode.name] = macroNode
- }
-
- return macroNode, nil
-}
-
-func init() {
- RegisterTag("macro", tagMacroParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_now.go b/vendor/github.com/flosch/pongo2/tags_now.go
deleted file mode 100644
index d9fa4a37..00000000
--- a/vendor/github.com/flosch/pongo2/tags_now.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package pongo2
-
-import (
- "time"
-)
-
-type tagNowNode struct {
- position *Token
- format string
- fake bool
-}
-
-func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- var t time.Time
- if node.fake {
- t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
- } else {
- t = time.Now()
- }
-
- writer.WriteString(t.Format(node.format))
-
- return nil
-}
-
-func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- nowNode := &tagNowNode{
- position: start,
- }
-
- formatToken := arguments.MatchType(TokenString)
- if formatToken == nil {
- return nil, arguments.Error("Expected a format string.", nil)
- }
- nowNode.format = formatToken.Val
-
- if arguments.MatchOne(TokenIdentifier, "fake") != nil {
- nowNode.fake = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed now-tag arguments.", nil)
- }
-
- return nowNode, nil
-}
-
-func init() {
- RegisterTag("now", tagNowParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_set.go b/vendor/github.com/flosch/pongo2/tags_set.go
deleted file mode 100644
index be121c12..00000000
--- a/vendor/github.com/flosch/pongo2/tags_set.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package pongo2
-
-type tagSetNode struct {
- name string
- expression IEvaluator
-}
-
-func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- // Evaluate expression
- value, err := node.expression.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- ctx.Private[node.name] = value
- return nil
-}
-
-func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- node := &tagSetNode{}
-
- // Parse variable name
- typeToken := arguments.MatchType(TokenIdentifier)
- if typeToken == nil {
- return nil, arguments.Error("Expected an identifier.", nil)
- }
- node.name = typeToken.Val
-
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
-
- // Variable expression
- keyExpression, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- node.expression = keyExpression
-
- // Remaining arguments
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
- }
-
- return node, nil
-}
-
-func init() {
- RegisterTag("set", tagSetParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_spaceless.go b/vendor/github.com/flosch/pongo2/tags_spaceless.go
deleted file mode 100644
index 4fa851ba..00000000
--- a/vendor/github.com/flosch/pongo2/tags_spaceless.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "regexp"
-)
-
-type tagSpacelessNode struct {
- wrapper *NodeWrapper
-}
-
-var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
-
-func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
-
- err := node.wrapper.Execute(ctx, b)
- if err != nil {
- return err
- }
-
- s := b.String()
- // Repeat this recursively
- changed := true
- for changed {
- s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
- changed = s != s2
- s = s2
- }
-
- writer.WriteString(s)
-
- return nil
-}
-
-func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- spacelessNode := &tagSpacelessNode{}
-
- wrapper, _, err := doc.WrapUntilTag("endspaceless")
- if err != nil {
- return nil, err
- }
- spacelessNode.wrapper = wrapper
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
- }
-
- return spacelessNode, nil
-}
-
-func init() {
- RegisterTag("spaceless", tagSpacelessParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ssi.go b/vendor/github.com/flosch/pongo2/tags_ssi.go
deleted file mode 100644
index c33858d5..00000000
--- a/vendor/github.com/flosch/pongo2/tags_ssi.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package pongo2
-
-import (
- "io/ioutil"
-)
-
-type tagSSINode struct {
- filename string
- content string
- template *Template
-}
-
-func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- if node.template != nil {
- // Execute the template within the current context
- includeCtx := make(Context)
- includeCtx.Update(ctx.Public)
- includeCtx.Update(ctx.Private)
-
- err := node.template.execute(includeCtx, writer)
- if err != nil {
- return err.(*Error)
- }
- } else {
- // Just print out the content
- writer.WriteString(node.content)
- }
- return nil
-}
-
-func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- SSINode := &tagSSINode{}
-
- if fileToken := arguments.MatchType(TokenString); fileToken != nil {
- SSINode.filename = fileToken.Val
-
- if arguments.Match(TokenIdentifier, "parsed") != nil {
- // parsed
- temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
- if err != nil {
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken)
- }
- SSINode.template = temporaryTpl
- } else {
- // plaintext
- buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
- if err != nil {
- return nil, (&Error{
- Sender: "tag:ssi",
- OrigError: err,
- }).updateFromTokenIfNeeded(doc.template, fileToken)
- }
- SSINode.content = string(buf)
- }
- } else {
- return nil, arguments.Error("First argument must be a string.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed SSI-tag argument.", nil)
- }
-
- return SSINode, nil
-}
-
-func init() {
- RegisterTag("ssi", tagSSIParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_templatetag.go b/vendor/github.com/flosch/pongo2/tags_templatetag.go
deleted file mode 100644
index 164b4dc3..00000000
--- a/vendor/github.com/flosch/pongo2/tags_templatetag.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package pongo2
-
-type tagTemplateTagNode struct {
- content string
-}
-
-var templateTagMapping = map[string]string{
- "openblock": "{%",
- "closeblock": "%}",
- "openvariable": "{{",
- "closevariable": "}}",
- "openbrace": "{",
- "closebrace": "}",
- "opencomment": "{#",
- "closecomment": "#}",
-}
-
-func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- writer.WriteString(node.content)
- return nil
-}
-
-func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ttNode := &tagTemplateTagNode{}
-
- if argToken := arguments.MatchType(TokenIdentifier); argToken != nil {
- output, found := templateTagMapping[argToken.Val]
- if !found {
- return nil, arguments.Error("Argument not found", argToken)
- }
- ttNode.content = output
- } else {
- return nil, arguments.Error("Identifier expected.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
- }
-
- return ttNode, nil
-}
-
-func init() {
- RegisterTag("templatetag", tagTemplateTagParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_widthratio.go b/vendor/github.com/flosch/pongo2/tags_widthratio.go
deleted file mode 100644
index 70c9c3e8..00000000
--- a/vendor/github.com/flosch/pongo2/tags_widthratio.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math"
-)
-
-type tagWidthratioNode struct {
- position *Token
- current, max IEvaluator
- width IEvaluator
- ctxName string
-}
-
-func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- current, err := node.current.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- max, err := node.max.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- width, err := node.width.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
-
- if node.ctxName == "" {
- writer.WriteString(fmt.Sprintf("%d", value))
- } else {
- ctx.Private[node.ctxName] = value
- }
-
- return nil
-}
-
-func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- widthratioNode := &tagWidthratioNode{
- position: start,
- }
-
- current, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.current = current
-
- max, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.max = max
-
- width, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.width = width
-
- if arguments.MatchOne(TokenKeyword, "as") != nil {
- // Name follows
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Expected name (identifier).", nil)
- }
- widthratioNode.ctxName = nameToken.Val
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
- }
-
- return widthratioNode, nil
-}
-
-func init() {
- RegisterTag("widthratio", tagWidthratioParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_with.go b/vendor/github.com/flosch/pongo2/tags_with.go
deleted file mode 100644
index 32b3c1c4..00000000
--- a/vendor/github.com/flosch/pongo2/tags_with.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pongo2
-
-type tagWithNode struct {
- withPairs map[string]IEvaluator
- wrapper *NodeWrapper
-}
-
-func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- //new context for block
- withctx := NewChildExecutionContext(ctx)
-
- // Put all custom with-pairs into the context
- for key, value := range node.withPairs {
- val, err := value.Evaluate(ctx)
- if err != nil {
- return err
- }
- withctx.Private[key] = val
- }
-
- return node.wrapper.Execute(withctx, writer)
-}
-
-func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- withNode := &tagWithNode{
- withPairs: make(map[string]IEvaluator),
- }
-
- if arguments.Count() == 0 {
- return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
- }
-
- wrapper, endargs, err := doc.WrapUntilTag("endwith")
- if err != nil {
- return nil, err
- }
- withNode.wrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- // Scan through all arguments to see which style the user uses (old or new style).
- // If we find any "as" keyword we will enforce old style; otherwise we will use new style.
- oldStyle := false // by default we're using the new_style
- for i := 0; i < arguments.Count(); i++ {
- if arguments.PeekN(i, TokenKeyword, "as") != nil {
- oldStyle = true
- break
- }
- }
-
- for arguments.Remaining() > 0 {
- if oldStyle {
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- if arguments.Match(TokenKeyword, "as") == nil {
- return nil, arguments.Error("Expected 'as' keyword.", nil)
- }
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- withNode.withPairs[keyToken.Val] = valueExpr
- } else {
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- withNode.withPairs[keyToken.Val] = valueExpr
- }
- }
-
- return withNode, nil
-}
-
-func init() {
- RegisterTag("with", tagWithParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/template.go b/vendor/github.com/flosch/pongo2/template.go
deleted file mode 100644
index 47666c94..00000000
--- a/vendor/github.com/flosch/pongo2/template.go
+++ /dev/null
@@ -1,276 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
-)
-
-type TemplateWriter interface {
- io.Writer
- WriteString(string) (int, error)
-}
-
-type templateWriter struct {
- w io.Writer
-}
-
-func (tw *templateWriter) WriteString(s string) (int, error) {
- return tw.w.Write([]byte(s))
-}
-
-func (tw *templateWriter) Write(b []byte) (int, error) {
- return tw.w.Write(b)
-}
-
-type Template struct {
- set *TemplateSet
-
- // Input
- isTplString bool
- name string
- tpl string
- size int
-
- // Calculation
- tokens []*Token
- parser *Parser
-
- // first come, first serve (it's important to not override existing entries in here)
- level int
- parent *Template
- child *Template
- blocks map[string]*NodeWrapper
- exportedMacros map[string]*tagMacroNode
-
- // Output
- root *nodeDocument
-
- // Options allow you to change the behavior of template-engine.
- // You can change the options before calling the Execute method.
- Options *Options
-}
-
-func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) {
- return newTemplate(set, "", true, tpl)
-}
-
-func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) {
- strTpl := string(tpl)
-
- // Create the template
- t := &Template{
- set: set,
- isTplString: isTplString,
- name: name,
- tpl: strTpl,
- size: len(strTpl),
- blocks: make(map[string]*NodeWrapper),
- exportedMacros: make(map[string]*tagMacroNode),
- Options: newOptions(),
- }
- // Copy all settings from another Options.
- t.Options.Update(set.Options)
-
- // Tokenize it
- tokens, err := lex(name, strTpl)
- if err != nil {
- return nil, err
- }
- t.tokens = tokens
-
- // For debugging purposes, show all tokens:
- /*for i, t := range tokens {
- fmt.Printf("%3d. %s\n", i, t)
- }*/
-
- // Parse it
- err = t.parse()
- if err != nil {
- return nil, err
- }
-
- return t, nil
-}
-
-func (tpl *Template) newContextForExecution(context Context) (*Template, *ExecutionContext, error) {
- if tpl.Options.TrimBlocks || tpl.Options.LStripBlocks {
- // Issue #94 https://github.com/flosch/pongo2/issues/94
- // If an application configures pongo2 template to trim_blocks,
- // the first newline after a template tag is removed automatically (like in PHP).
- prev := &Token{
- Typ: TokenHTML,
- Val: "\n",
- }
-
- for _, t := range tpl.tokens {
- if tpl.Options.LStripBlocks {
- if prev.Typ == TokenHTML && t.Typ != TokenHTML && t.Val == "{%" {
- prev.Val = strings.TrimRight(prev.Val, "\t ")
- }
- }
-
- if tpl.Options.TrimBlocks {
- if prev.Typ != TokenHTML && t.Typ == TokenHTML && prev.Val == "%}" {
- if len(t.Val) > 0 && t.Val[0] == '\n' {
- t.Val = t.Val[1:len(t.Val)]
- }
- }
- }
-
- prev = t
- }
- }
-
- // Determine the parent to be executed (for template inheritance)
- parent := tpl
- for parent.parent != nil {
- parent = parent.parent
- }
-
- // Create context if none is given
- newContext := make(Context)
- newContext.Update(tpl.set.Globals)
-
- if context != nil {
- newContext.Update(context)
-
- if len(newContext) > 0 {
- // Check for context name syntax
- err := newContext.checkForValidIdentifiers()
- if err != nil {
- return parent, nil, err
- }
-
- // Check for clashes with macro names
- for k := range newContext {
- _, has := tpl.exportedMacros[k]
- if has {
- return parent, nil, &Error{
- Filename: tpl.name,
- Sender: "execution",
- OrigError: fmt.Errorf("context key name '%s' clashes with macro '%s'", k, k),
- }
- }
- }
- }
- }
-
- // Create operational context
- ctx := newExecutionContext(parent, newContext)
-
- return parent, ctx, nil
-}
-
-func (tpl *Template) execute(context Context, writer TemplateWriter) error {
- parent, ctx, err := tpl.newContextForExecution(context)
- if err != nil {
- return err
- }
-
- // Run the selected document
- if err := parent.root.Execute(ctx, writer); err != nil {
- return err
- }
-
- return nil
-}
-
-func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
- return tpl.execute(context, &templateWriter{w: writer})
-}
-
-func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
- // Create output buffer
- // We assume that the rendered template will be 30% larger
- buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
- if err := tpl.execute(context, buffer); err != nil {
- return nil, err
- }
- return buffer, nil
-}
-
-// Executes the template with the given context and writes to writer (io.Writer)
-// on success. Context can be nil. Nothing is written on error; instead the error
-// is being returned.
-func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
- buf, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return err
- }
- _, err = buf.WriteTo(writer)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Same as ExecuteWriter. The only difference between both functions is that
-// this function might already have written parts of the generated template in the
-// case of an execution error because there's no intermediate buffer involved for
-// performance reasons. This is handy if you need high performance template
-// generation or if you want to manage your own pool of buffers.
-func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
- return tpl.newTemplateWriterAndExecute(context, writer)
-}
-
-// Executes the template and returns the rendered template as a []byte
-func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
- // Execute template
- buffer, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return nil, err
- }
- return buffer.Bytes(), nil
-}
-
-// Executes the template and returns the rendered template as a string
-func (tpl *Template) Execute(context Context) (string, error) {
- // Execute template
- buffer, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return "", err
- }
-
- return buffer.String(), nil
-
-}
-
-func (tpl *Template) ExecuteBlocks(context Context, blocks []string) (map[string]string, error) {
- var parents []*Template
- result := make(map[string]string)
-
- parent := tpl
- for parent != nil {
- parents = append(parents, parent)
- parent = parent.parent
- }
-
- for _, t := range parents {
- buffer := bytes.NewBuffer(make([]byte, 0, int(float64(t.size)*1.3)))
- _, ctx, err := t.newContextForExecution(context)
- if err != nil {
- return nil, err
- }
- for _, blockName := range blocks {
- if _, ok := result[blockName]; ok {
- continue
- }
- if blockWrapper, ok := t.blocks[blockName]; ok {
- bErr := blockWrapper.Execute(ctx, buffer)
- if bErr != nil {
- return nil, bErr
- }
- result[blockName] = buffer.String()
- buffer.Reset()
- }
- }
- // We have found all blocks
- if len(blocks) == len(result) {
- break
- }
- }
-
- return result, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/template_loader.go b/vendor/github.com/flosch/pongo2/template_loader.go
deleted file mode 100644
index abd23409..00000000
--- a/vendor/github.com/flosch/pongo2/template_loader.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
-)
-
-// LocalFilesystemLoader represents a local filesystem loader with basic
-// BaseDirectory capabilities. The access to the local filesystem is unrestricted.
-type LocalFilesystemLoader struct {
- baseDir string
-}
-
-// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance
-// and panics if there's any error during instantiation. The parameters
-// are the same like NewLocalFileSystemLoader.
-func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader {
- fs, err := NewLocalFileSystemLoader(baseDir)
- if err != nil {
- log.Panic(err)
- }
- return fs
-}
-
-// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows
-// templatesto be loaded from disk (unrestricted). If any base directory
-// is given (or being set using SetBaseDir), this base directory is being used
-// for path calculation in template inclusions/imports. Otherwise the path
-// is calculated based relatively to the including template's path.
-func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) {
- fs := &LocalFilesystemLoader{}
- if baseDir != "" {
- if err := fs.SetBaseDir(baseDir); err != nil {
- return nil, err
- }
- }
- return fs, nil
-}
-
-// SetBaseDir sets the template's base directory. This directory will
-// be used for any relative path in filters, tags and From*-functions to determine
-// your template. See the comment for NewLocalFileSystemLoader as well.
-func (fs *LocalFilesystemLoader) SetBaseDir(path string) error {
- // Make the path absolute
- if !filepath.IsAbs(path) {
- abs, err := filepath.Abs(path)
- if err != nil {
- return err
- }
- path = abs
- }
-
- // Check for existence
- fi, err := os.Stat(path)
- if err != nil {
- return err
- }
- if !fi.IsDir() {
- return fmt.Errorf("The given path '%s' is not a directory.", path)
- }
-
- fs.baseDir = path
- return nil
-}
-
-// Get reads the path's content from your local filesystem.
-func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) {
- buf, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- return bytes.NewReader(buf), nil
-}
-
-// Abs resolves a filename relative to the base directory. Absolute paths are allowed.
-// When there's no base dir set, the absolute path to the filename
-// will be calculated based on either the provided base directory (which
-// might be a path of a template which includes another template) or
-// the current working directory.
-func (fs *LocalFilesystemLoader) Abs(base, name string) string {
- if filepath.IsAbs(name) {
- return name
- }
-
- // Our own base dir has always priority; if there's none
- // we use the path provided in base.
- var err error
- if fs.baseDir == "" {
- if base == "" {
- base, err = os.Getwd()
- if err != nil {
- panic(err)
- }
- return filepath.Join(base, name)
- }
-
- return filepath.Join(filepath.Dir(base), name)
- }
-
- return filepath.Join(fs.baseDir, name)
-}
-
-// SandboxedFilesystemLoader is still WIP.
-type SandboxedFilesystemLoader struct {
- *LocalFilesystemLoader
-}
-
-// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance.
-func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) {
- fs, err := NewLocalFileSystemLoader(baseDir)
- if err != nil {
- return nil, err
- }
- return &SandboxedFilesystemLoader{
- LocalFilesystemLoader: fs,
- }, nil
-}
-
-// Move sandbox to a virtual fs
-
-/*
-if len(set.SandboxDirectories) > 0 {
- defer func() {
- // Remove any ".." or other crap
- resolvedPath = filepath.Clean(resolvedPath)
-
- // Make the path absolute
- absPath, err := filepath.Abs(resolvedPath)
- if err != nil {
- panic(err)
- }
- resolvedPath = absPath
-
- // Check against the sandbox directories (once one pattern matches, we're done and can allow it)
- for _, pattern := range set.SandboxDirectories {
- matched, err := filepath.Match(pattern, resolvedPath)
- if err != nil {
- panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
- }
- if matched {
- // OK!
- return
- }
- }
-
- // No pattern matched, we have to log+deny the request
- set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath)
- resolvedPath = ""
- }()
-}
-*/
diff --git a/vendor/github.com/flosch/pongo2/template_sets.go b/vendor/github.com/flosch/pongo2/template_sets.go
deleted file mode 100644
index 4b1e43da..00000000
--- a/vendor/github.com/flosch/pongo2/template_sets.go
+++ /dev/null
@@ -1,305 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "sync"
-
- "errors"
-)
-
-// TemplateLoader allows to implement a virtual file system.
-type TemplateLoader interface {
- // Abs calculates the path to a given template. Whenever a path must be resolved
- // due to an import from another template, the base equals the parent template's path.
- Abs(base, name string) string
-
- // Get returns an io.Reader where the template's content can be read from.
- Get(path string) (io.Reader, error)
-}
-
-// TemplateSet allows you to create your own group of templates with their own
-// global context (which is shared among all members of the set) and their own
-// configuration.
-// It's useful for a separation of different kind of templates
-// (e. g. web templates vs. mail templates).
-type TemplateSet struct {
- name string
- loaders []TemplateLoader
-
- // Globals will be provided to all templates created within this template set
- Globals Context
-
- // If debug is true (default false), ExecutionContext.Logf() will work and output
- // to STDOUT. Furthermore, FromCache() won't cache the templates.
- // Make sure to synchronize the access to it in case you're changing this
- // variable during program execution (and template compilation/execution).
- Debug bool
-
- // Options allow you to change the behavior of template-engine.
- // You can change the options before calling the Execute method.
- Options *Options
-
- // Sandbox features
- // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
- //
- // For efficiency reasons you can ban tags/filters only *before* you have
- // added your first template to the set (restrictions are statically checked).
- // After you added one, it's not possible anymore (for your personal security).
- firstTemplateCreated bool
- bannedTags map[string]bool
- bannedFilters map[string]bool
-
- // Template cache (for FromCache())
- templateCache map[string]*Template
- templateCacheMutex sync.Mutex
-}
-
-// NewSet can be used to create sets with different kind of templates
-// (e. g. web from mail templates), with different globals or
-// other configurations.
-func NewSet(name string, loaders ...TemplateLoader) *TemplateSet {
- if len(loaders) == 0 {
- panic(fmt.Errorf("at least one template loader must be specified"))
- }
-
- return &TemplateSet{
- name: name,
- loaders: loaders,
- Globals: make(Context),
- bannedTags: make(map[string]bool),
- bannedFilters: make(map[string]bool),
- templateCache: make(map[string]*Template),
- Options: newOptions(),
- }
-}
-
-func (set *TemplateSet) AddLoader(loaders ...TemplateLoader) {
- set.loaders = append(set.loaders, loaders...)
-}
-
-func (set *TemplateSet) resolveFilename(tpl *Template, path string) string {
- return set.resolveFilenameForLoader(set.loaders[0], tpl, path)
-}
-
-func (set *TemplateSet) resolveFilenameForLoader(loader TemplateLoader, tpl *Template, path string) string {
- name := ""
- if tpl != nil && tpl.isTplString {
- return path
- }
- if tpl != nil {
- name = tpl.name
- }
-
- return loader.Abs(name, path)
-}
-
-// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet.
-func (set *TemplateSet) BanTag(name string) error {
- _, has := tags[name]
- if !has {
- return fmt.Errorf("tag '%s' not found", name)
- }
- if set.firstTemplateCreated {
- return errors.New("you cannot ban any tags after you've added your first template to your template set")
- }
- _, has = set.bannedTags[name]
- if has {
- return fmt.Errorf("tag '%s' is already banned", name)
- }
- set.bannedTags[name] = true
-
- return nil
-}
-
-// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet.
-func (set *TemplateSet) BanFilter(name string) error {
- _, has := filters[name]
- if !has {
- return fmt.Errorf("filter '%s' not found", name)
- }
- if set.firstTemplateCreated {
- return errors.New("you cannot ban any filters after you've added your first template to your template set")
- }
- _, has = set.bannedFilters[name]
- if has {
- return fmt.Errorf("filter '%s' is already banned", name)
- }
- set.bannedFilters[name] = true
-
- return nil
-}
-
-func (set *TemplateSet) resolveTemplate(tpl *Template, path string) (name string, loader TemplateLoader, fd io.Reader, err error) {
- // iterate over loaders until we appear to have a valid template
- for _, loader = range set.loaders {
- name = set.resolveFilenameForLoader(loader, tpl, path)
- fd, err = loader.Get(name)
- if err == nil {
- return
- }
- }
-
- return path, nil, nil, fmt.Errorf("unable to resolve template")
-}
-
-// CleanCache cleans the template cache. If filenames is not empty,
-// it will remove the template caches of those filenames.
-// Or it will empty the whole template cache. It is thread-safe.
-func (set *TemplateSet) CleanCache(filenames ...string) {
- set.templateCacheMutex.Lock()
- defer set.templateCacheMutex.Unlock()
-
- if len(filenames) == 0 {
- set.templateCache = make(map[string]*Template, len(set.templateCache))
- }
-
- for _, filename := range filenames {
- delete(set.templateCache, set.resolveFilename(nil, filename))
- }
-}
-
-// FromCache is a convenient method to cache templates. It is thread-safe
-// and will only compile the template associated with a filename once.
-// If TemplateSet.Debug is true (for example during development phase),
-// FromCache() will not cache the template and instead recompile it on any
-// call (to make changes to a template live instantaneously).
-func (set *TemplateSet) FromCache(filename string) (*Template, error) {
- if set.Debug {
- // Recompile on any request
- return set.FromFile(filename)
- }
- // Cache the template
- cleanedFilename := set.resolveFilename(nil, filename)
-
- set.templateCacheMutex.Lock()
- defer set.templateCacheMutex.Unlock()
-
- tpl, has := set.templateCache[cleanedFilename]
-
- // Cache miss
- if !has {
- tpl, err := set.FromFile(cleanedFilename)
- if err != nil {
- return nil, err
- }
- set.templateCache[cleanedFilename] = tpl
- return tpl, nil
- }
-
- // Cache hit
- return tpl, nil
-}
-
-// FromString loads a template from string and returns a Template instance.
-func (set *TemplateSet) FromString(tpl string) (*Template, error) {
- set.firstTemplateCreated = true
-
- return newTemplateString(set, []byte(tpl))
-}
-
-// FromBytes loads a template from bytes and returns a Template instance.
-func (set *TemplateSet) FromBytes(tpl []byte) (*Template, error) {
- set.firstTemplateCreated = true
-
- return newTemplateString(set, tpl)
-}
-
-// FromFile loads a template from a filename and returns a Template instance.
-func (set *TemplateSet) FromFile(filename string) (*Template, error) {
- set.firstTemplateCreated = true
-
- _, _, fd, err := set.resolveTemplate(nil, filename)
- if err != nil {
- return nil, &Error{
- Filename: filename,
- Sender: "fromfile",
- OrigError: err,
- }
- }
- buf, err := ioutil.ReadAll(fd)
- if err != nil {
- return nil, &Error{
- Filename: filename,
- Sender: "fromfile",
- OrigError: err,
- }
- }
-
- return newTemplate(set, filename, false, buf)
-}
-
-// RenderTemplateString is a shortcut and renders a template string directly.
-func (set *TemplateSet) RenderTemplateString(s string, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromString(s))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-// RenderTemplateBytes is a shortcut and renders template bytes directly.
-func (set *TemplateSet) RenderTemplateBytes(b []byte, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromBytes(b))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-// RenderTemplateFile is a shortcut and renders a template file directly.
-func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromFile(fn))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-func (set *TemplateSet) logf(format string, args ...interface{}) {
- if set.Debug {
- logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
- }
-}
-
-// Logging function (internally used)
-func logf(format string, items ...interface{}) {
- if debug {
- logger.Printf(format, items...)
- }
-}
-
-var (
- debug bool // internal debugging
- logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile)
-
- // DefaultLoader allows the default un-sandboxed access to the local file
- // system and is being used by the DefaultSet.
- DefaultLoader = MustNewLocalFileSystemLoader("")
-
- // DefaultSet is a set created for you for convinience reasons.
- DefaultSet = NewSet("default", DefaultLoader)
-
- // Methods on the default set
- FromString = DefaultSet.FromString
- FromBytes = DefaultSet.FromBytes
- FromFile = DefaultSet.FromFile
- FromCache = DefaultSet.FromCache
- RenderTemplateString = DefaultSet.RenderTemplateString
- RenderTemplateFile = DefaultSet.RenderTemplateFile
-
- // Globals for the default set
- Globals = DefaultSet.Globals
-)
diff --git a/vendor/github.com/flosch/pongo2/value.go b/vendor/github.com/flosch/pongo2/value.go
deleted file mode 100644
index 8b49adb7..00000000
--- a/vendor/github.com/flosch/pongo2/value.go
+++ /dev/null
@@ -1,540 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type Value struct {
- val reflect.Value
- safe bool // used to indicate whether a Value needs explicit escaping in the template
-}
-
-// AsValue converts any given value to a pongo2.Value
-// Usually being used within own functions passed to a template
-// through a Context or within filter functions.
-//
-// Example:
-// AsValue("my string")
-func AsValue(i interface{}) *Value {
- return &Value{
- val: reflect.ValueOf(i),
- }
-}
-
-// AsSafeValue works like AsValue, but does not apply the 'escape' filter.
-func AsSafeValue(i interface{}) *Value {
- return &Value{
- val: reflect.ValueOf(i),
- safe: true,
- }
-}
-
-func (v *Value) getResolvedValue() reflect.Value {
- if v.val.IsValid() && v.val.Kind() == reflect.Ptr {
- return v.val.Elem()
- }
- return v.val
-}
-
-// IsString checks whether the underlying value is a string
-func (v *Value) IsString() bool {
- return v.getResolvedValue().Kind() == reflect.String
-}
-
-// IsBool checks whether the underlying value is a bool
-func (v *Value) IsBool() bool {
- return v.getResolvedValue().Kind() == reflect.Bool
-}
-
-// IsFloat checks whether the underlying value is a float
-func (v *Value) IsFloat() bool {
- return v.getResolvedValue().Kind() == reflect.Float32 ||
- v.getResolvedValue().Kind() == reflect.Float64
-}
-
-// IsInteger checks whether the underlying value is an integer
-func (v *Value) IsInteger() bool {
- return v.getResolvedValue().Kind() == reflect.Int ||
- v.getResolvedValue().Kind() == reflect.Int8 ||
- v.getResolvedValue().Kind() == reflect.Int16 ||
- v.getResolvedValue().Kind() == reflect.Int32 ||
- v.getResolvedValue().Kind() == reflect.Int64 ||
- v.getResolvedValue().Kind() == reflect.Uint ||
- v.getResolvedValue().Kind() == reflect.Uint8 ||
- v.getResolvedValue().Kind() == reflect.Uint16 ||
- v.getResolvedValue().Kind() == reflect.Uint32 ||
- v.getResolvedValue().Kind() == reflect.Uint64
-}
-
-// IsNumber checks whether the underlying value is either an integer
-// or a float.
-func (v *Value) IsNumber() bool {
- return v.IsInteger() || v.IsFloat()
-}
-
-// IsTime checks whether the underlying value is a time.Time.
-func (v *Value) IsTime() bool {
- _, ok := v.Interface().(time.Time)
- return ok
-}
-
-// IsNil checks whether the underlying value is NIL
-func (v *Value) IsNil() bool {
- //fmt.Printf("%+v\n", v.getResolvedValue().Type().String())
- return !v.getResolvedValue().IsValid()
-}
-
-// String returns a string for the underlying value. If this value is not
-// of type string, pongo2 tries to convert it. Currently the following
-// types for underlying values are supported:
-//
-// 1. string
-// 2. int/uint (any size)
-// 3. float (any precision)
-// 4. bool
-// 5. time.Time
-// 6. String() will be called on the underlying value if provided
-//
-// NIL values will lead to an empty string. Unsupported types are leading
-// to their respective type name.
-func (v *Value) String() string {
- if v.IsNil() {
- return ""
- }
-
- switch v.getResolvedValue().Kind() {
- case reflect.String:
- return v.getResolvedValue().String()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return strconv.FormatInt(v.getResolvedValue().Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return strconv.FormatUint(v.getResolvedValue().Uint(), 10)
- case reflect.Float32, reflect.Float64:
- return fmt.Sprintf("%f", v.getResolvedValue().Float())
- case reflect.Bool:
- if v.Bool() {
- return "True"
- }
- return "False"
- case reflect.Struct:
- if t, ok := v.Interface().(fmt.Stringer); ok {
- return t.String()
- }
- }
-
- logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String())
- return v.getResolvedValue().String()
-}
-
-// Integer returns the underlying value as an integer (converts the underlying
-// value, if necessary). If it's not possible to convert the underlying value,
-// it will return 0.
-func (v *Value) Integer() int {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return int(v.getResolvedValue().Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return int(v.getResolvedValue().Uint())
- case reflect.Float32, reflect.Float64:
- return int(v.getResolvedValue().Float())
- case reflect.String:
- // Try to convert from string to int (base 10)
- f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
- if err != nil {
- return 0
- }
- return int(f)
- default:
- logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0
- }
-}
-
-// Float returns the underlying value as a float (converts the underlying
-// value, if necessary). If it's not possible to convert the underlying value,
-// it will return 0.0.
-func (v *Value) Float() float64 {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return float64(v.getResolvedValue().Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return float64(v.getResolvedValue().Uint())
- case reflect.Float32, reflect.Float64:
- return v.getResolvedValue().Float()
- case reflect.String:
- // Try to convert from string to float64 (base 10)
- f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
- if err != nil {
- return 0.0
- }
- return f
- default:
- logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0.0
- }
-}
-
-// Bool returns the underlying value as bool. If the value is not bool, false
-// will always be returned. If you're looking for true/false-evaluation of the
-// underlying value, have a look on the IsTrue()-function.
-func (v *Value) Bool() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Bool:
- return v.getResolvedValue().Bool()
- default:
- logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// Time returns the underlying value as time.Time.
-// If the underlying value is not a time.Time, it returns the zero value of time.Time.
-func (v *Value) Time() time.Time {
- tm, ok := v.Interface().(time.Time)
- if ok {
- return tm
- }
- return time.Time{}
-}
-
-// IsTrue tries to evaluate the underlying value the Pythonic-way:
-//
-// Returns TRUE in one the following cases:
-//
-// * int != 0
-// * uint != 0
-// * float != 0.0
-// * len(array/chan/map/slice/string) > 0
-// * bool == true
-// * underlying value is a struct
-//
-// Otherwise returns always FALSE.
-func (v *Value) IsTrue() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.getResolvedValue().Int() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return v.getResolvedValue().Uint() != 0
- case reflect.Float32, reflect.Float64:
- return v.getResolvedValue().Float() != 0
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.getResolvedValue().Len() > 0
- case reflect.Bool:
- return v.getResolvedValue().Bool()
- case reflect.Struct:
- return true // struct instance is always true
- default:
- logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// Negate tries to negate the underlying value. It's mainly used for
-// the NOT-operator and in conjunction with a call to
-// return_value.IsTrue() afterwards.
-//
-// Example:
-// AsValue(1).Negate().IsTrue() == false
-func (v *Value) Negate() *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- if v.Integer() != 0 {
- return AsValue(0)
- }
- return AsValue(1)
- case reflect.Float32, reflect.Float64:
- if v.Float() != 0.0 {
- return AsValue(float64(0.0))
- }
- return AsValue(float64(1.1))
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return AsValue(v.getResolvedValue().Len() == 0)
- case reflect.Bool:
- return AsValue(!v.getResolvedValue().Bool())
- case reflect.Struct:
- return AsValue(false)
- default:
- logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue(true)
- }
-}
-
-// Len returns the length for an array, chan, map, slice or string.
-// Otherwise it will return 0.
-func (v *Value) Len() int {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
- return v.getResolvedValue().Len()
- case reflect.String:
- runes := []rune(v.getResolvedValue().String())
- return len(runes)
- default:
- logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0
- }
-}
-
-// Slice slices an array, slice or string. Otherwise it will
-// return an empty []int.
-func (v *Value) Slice(i, j int) *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice:
- return AsValue(v.getResolvedValue().Slice(i, j).Interface())
- case reflect.String:
- runes := []rune(v.getResolvedValue().String())
- return AsValue(string(runes[i:j]))
- default:
- logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue([]int{})
- }
-}
-
-// Index gets the i-th item of an array, slice or string. Otherwise
-// it will return NIL.
-func (v *Value) Index(i int) *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice:
- if i >= v.Len() {
- return AsValue(nil)
- }
- return AsValue(v.getResolvedValue().Index(i).Interface())
- case reflect.String:
- //return AsValue(v.getResolvedValue().Slice(i, i+1).Interface())
- s := v.getResolvedValue().String()
- runes := []rune(s)
- if i < len(runes) {
- return AsValue(string(runes[i]))
- }
- return AsValue("")
- default:
- logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue([]int{})
- }
-}
-
-// Contains checks whether the underlying value (which must be of type struct, map,
-// string, array or slice) contains of another Value (e. g. used to check
-// whether a struct contains of a specific field or a map contains a specific key).
-//
-// Example:
-// AsValue("Hello, World!").Contains(AsValue("World")) == true
-func (v *Value) Contains(other *Value) bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Struct:
- fieldValue := v.getResolvedValue().FieldByName(other.String())
- return fieldValue.IsValid()
- case reflect.Map:
- var mapValue reflect.Value
- switch other.Interface().(type) {
- case int:
- mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
- case string:
- mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
- default:
- logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String())
- return false
- }
-
- return mapValue.IsValid()
- case reflect.String:
- return strings.Contains(v.getResolvedValue().String(), other.String())
-
- case reflect.Slice, reflect.Array:
- for i := 0; i < v.getResolvedValue().Len(); i++ {
- item := v.getResolvedValue().Index(i)
- if other.Interface() == item.Interface() {
- return true
- }
- }
- return false
-
- default:
- logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// CanSlice checks whether the underlying value is of type array, slice or string.
-// You normally would use CanSlice() before using the Slice() operation.
-func (v *Value) CanSlice() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- return true
- }
- return false
-}
-
-// Iterate iterates over a map, array, slice or a string. It calls the
-// function's first argument for every value with the following arguments:
-//
-// idx current 0-index
-// count total amount of items
-// key *Value for the key or item
-// value *Value (only for maps, the respective value for a specific key)
-//
-// If the underlying value has no items or is not one of the types above,
-// the empty function (function's second argument) will be called.
-func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) {
- v.IterateOrder(fn, empty, false, false)
-}
-
-// IterateOrder behaves like Value.Iterate, but can iterate through an array/slice/string in reverse. Does
-// not affect the iteration through a map because maps don't have any particular order.
-// However, you can force an order using the `sorted` keyword (and even use `reversed sorted`).
-func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) {
- switch v.getResolvedValue().Kind() {
- case reflect.Map:
- keys := sortedKeys(v.getResolvedValue().MapKeys())
- if sorted {
- if reverse {
- sort.Sort(sort.Reverse(keys))
- } else {
- sort.Sort(keys)
- }
- }
- keyLen := len(keys)
- for idx, key := range keys {
- value := v.getResolvedValue().MapIndex(key)
- if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) {
- return
- }
- }
- if keyLen == 0 {
- empty()
- }
- return // done
- case reflect.Array, reflect.Slice:
- var items valuesList
-
- itemCount := v.getResolvedValue().Len()
- for i := 0; i < itemCount; i++ {
- items = append(items, &Value{val: v.getResolvedValue().Index(i)})
- }
-
- if sorted {
- if reverse {
- sort.Sort(sort.Reverse(items))
- } else {
- sort.Sort(items)
- }
- } else {
- if reverse {
- for i := 0; i < itemCount/2; i++ {
- items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i]
- }
- }
- }
-
- if len(items) > 0 {
- for idx, item := range items {
- if !fn(idx, itemCount, item, nil) {
- return
- }
- }
- } else {
- empty()
- }
- return // done
- case reflect.String:
- if sorted {
- // TODO(flosch): Handle sorted
- panic("TODO: handle sort for type string")
- }
-
- // TODO(flosch): Not utf8-compatible (utf8-decoding necessary)
- charCount := v.getResolvedValue().Len()
- if charCount > 0 {
- if reverse {
- for i := charCount - 1; i >= 0; i-- {
- if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
- return
- }
- }
- } else {
- for i := 0; i < charCount; i++ {
- if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
- return
- }
- }
- }
- } else {
- empty()
- }
- return // done
- default:
- logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String())
- }
- empty()
-}
-
-// Interface gives you access to the underlying value.
-func (v *Value) Interface() interface{} {
- if v.val.IsValid() {
- return v.val.Interface()
- }
- return nil
-}
-
-// EqualValueTo checks whether two values are containing the same value or object.
-func (v *Value) EqualValueTo(other *Value) bool {
- // comparison of uint with int fails using .Interface()-comparison (see issue #64)
- if v.IsInteger() && other.IsInteger() {
- return v.Integer() == other.Integer()
- }
- if v.IsTime() && other.IsTime() {
- return v.Time().Equal(other.Time())
- }
- return v.Interface() == other.Interface()
-}
-
-type sortedKeys []reflect.Value
-
-func (sk sortedKeys) Len() int {
- return len(sk)
-}
-
-func (sk sortedKeys) Less(i, j int) bool {
- vi := &Value{val: sk[i]}
- vj := &Value{val: sk[j]}
- switch {
- case vi.IsInteger() && vj.IsInteger():
- return vi.Integer() < vj.Integer()
- case vi.IsFloat() && vj.IsFloat():
- return vi.Float() < vj.Float()
- default:
- return vi.String() < vj.String()
- }
-}
-
-func (sk sortedKeys) Swap(i, j int) {
- sk[i], sk[j] = sk[j], sk[i]
-}
-
-type valuesList []*Value
-
-func (vl valuesList) Len() int {
- return len(vl)
-}
-
-func (vl valuesList) Less(i, j int) bool {
- vi := vl[i]
- vj := vl[j]
- switch {
- case vi.IsInteger() && vj.IsInteger():
- return vi.Integer() < vj.Integer()
- case vi.IsFloat() && vj.IsFloat():
- return vi.Float() < vj.Float()
- default:
- return vi.String() < vj.String()
- }
-}
-
-func (vl valuesList) Swap(i, j int) {
- vl[i], vl[j] = vl[j], vl[i]
-}
diff --git a/vendor/github.com/flosch/pongo2/variable.go b/vendor/github.com/flosch/pongo2/variable.go
deleted file mode 100644
index 25e2af40..00000000
--- a/vendor/github.com/flosch/pongo2/variable.go
+++ /dev/null
@@ -1,693 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-const (
- varTypeInt = iota
- varTypeIdent
-)
-
-var (
- typeOfValuePtr = reflect.TypeOf(new(Value))
- typeOfExecCtxPtr = reflect.TypeOf(new(ExecutionContext))
-)
-
-type variablePart struct {
- typ int
- s string
- i int
-
- isFunctionCall bool
- callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls)
-}
-
-type functionCallArgument interface {
- Evaluate(*ExecutionContext) (*Value, *Error)
-}
-
-// TODO: Add location tokens
-type stringResolver struct {
- locationToken *Token
- val string
-}
-
-type intResolver struct {
- locationToken *Token
- val int
-}
-
-type floatResolver struct {
- locationToken *Token
- val float64
-}
-
-type boolResolver struct {
- locationToken *Token
- val bool
-}
-
-type variableResolver struct {
- locationToken *Token
-
- parts []*variablePart
-}
-
-type nodeFilteredVariable struct {
- locationToken *Token
-
- resolver IEvaluator
- filterChain []*filterCall
-}
-
-type nodeVariable struct {
- locationToken *Token
- expr IEvaluator
-}
-
-type executionCtxEval struct{}
-
-func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := v.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := vr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := s.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := i.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := f.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := b.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (v *nodeFilteredVariable) GetPositionToken() *Token {
- return v.locationToken
-}
-
-func (vr *variableResolver) GetPositionToken() *Token {
- return vr.locationToken
-}
-
-func (s *stringResolver) GetPositionToken() *Token {
- return s.locationToken
-}
-
-func (i *intResolver) GetPositionToken() *Token {
- return i.locationToken
-}
-
-func (f *floatResolver) GetPositionToken() *Token {
- return f.locationToken
-}
-
-func (b *boolResolver) GetPositionToken() *Token {
- return b.locationToken
-}
-
-func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(s.val), nil
-}
-
-func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(i.val), nil
-}
-
-func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(f.val), nil
-}
-
-func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(b.val), nil
-}
-
-func (s *stringResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (i *intResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (f *floatResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (b *boolResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (nv *nodeVariable) FilterApplied(name string) bool {
- return nv.expr.FilterApplied(name)
-}
-
-func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := nv.expr.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape {
- // apply escape filter
- value, err = filters["escape"](value, nil)
- if err != nil {
- return err
- }
- }
-
- writer.WriteString(value.String())
- return nil
-}
-
-func (executionCtxEval) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(ctx), nil
-}
-
-func (vr *variableResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (vr *variableResolver) String() string {
- parts := make([]string, 0, len(vr.parts))
- for _, p := range vr.parts {
- switch p.typ {
- case varTypeInt:
- parts = append(parts, strconv.Itoa(p.i))
- case varTypeIdent:
- parts = append(parts, p.s)
- default:
- panic("unimplemented")
- }
- }
- return strings.Join(parts, ".")
-}
-
-func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) {
- var current reflect.Value
- var isSafe bool
-
- for idx, part := range vr.parts {
- if idx == 0 {
- // We're looking up the first part of the variable.
- // First we're having a look in our private
- // context (e. g. information provided by tags, like the forloop)
- val, inPrivate := ctx.Private[vr.parts[0].s]
- if !inPrivate {
- // Nothing found? Then have a final lookup in the public context
- val = ctx.Public[vr.parts[0].s]
- }
- current = reflect.ValueOf(val) // Get the initial value
- } else {
- // Next parts, resolve it from current
-
- // Before resolving the pointer, let's see if we have a method to call
- // Problem with resolving the pointer is we're changing the receiver
- isFunc := false
- if part.typ == varTypeIdent {
- funcValue := current.MethodByName(part.s)
- if funcValue.IsValid() {
- current = funcValue
- isFunc = true
- }
- }
-
- if !isFunc {
- // If current a pointer, resolve it
- if current.Kind() == reflect.Ptr {
- current = current.Elem()
- if !current.IsValid() {
- // Value is not valid (anymore)
- return AsValue(nil), nil
- }
- }
-
- // Look up which part must be called now
- switch part.typ {
- case varTypeInt:
- // Calling an index is only possible for:
- // * slices/arrays/strings
- switch current.Kind() {
- case reflect.String, reflect.Array, reflect.Slice:
- if part.i >= 0 && current.Len() > part.i {
- current = current.Index(part.i)
- } else {
- // In Django, exceeding the length of a list is just empty.
- return AsValue(nil), nil
- }
- default:
- return nil, fmt.Errorf("Can't access an index on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- case varTypeIdent:
- // debugging:
- // fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String())
-
- // Calling a field or key
- switch current.Kind() {
- case reflect.Struct:
- current = current.FieldByName(part.s)
- case reflect.Map:
- current = current.MapIndex(reflect.ValueOf(part.s))
- default:
- return nil, fmt.Errorf("Can't access a field by name on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- default:
- panic("unimplemented")
- }
- }
- }
-
- if !current.IsValid() {
- // Value is not valid (anymore)
- return AsValue(nil), nil
- }
-
- // If current is a reflect.ValueOf(pongo2.Value), then unpack it
- // Happens in function calls (as a return value) or by injecting
- // into the execution context (e.g. in a for-loop)
- if current.Type() == typeOfValuePtr {
- tmpValue := current.Interface().(*Value)
- current = tmpValue.val
- isSafe = tmpValue.safe
- }
-
- // Check whether this is an interface and resolve it where required
- if current.Kind() == reflect.Interface {
- current = reflect.ValueOf(current.Interface())
- }
-
- // Check if the part is a function call
- if part.isFunctionCall || current.Kind() == reflect.Func {
- // Check for callable
- if current.Kind() != reflect.Func {
- return nil, fmt.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String())
- }
-
- // Check for correct function syntax and types
- // func(*Value, ...) *Value
- t := current.Type()
- currArgs := part.callingArgs
-
- // If an implicit ExecCtx is needed
- if t.NumIn() > 0 && t.In(0) == typeOfExecCtxPtr {
- currArgs = append([]functionCallArgument{executionCtxEval{}}, currArgs...)
- }
-
- // Input arguments
- if len(currArgs) != t.NumIn() && !(len(currArgs) >= t.NumIn()-1 && t.IsVariadic()) {
- return nil,
- fmt.Errorf("Function input argument count (%d) of '%s' must be equal to the calling argument count (%d).",
- t.NumIn(), vr.String(), len(currArgs))
- }
-
- // Output arguments
- if t.NumOut() != 1 && t.NumOut() != 2 {
- return nil, fmt.Errorf("'%s' must have exactly 1 or 2 output arguments, the second argument must be of type error", vr.String())
- }
-
- // Evaluate all parameters
- var parameters []reflect.Value
-
- numArgs := t.NumIn()
- isVariadic := t.IsVariadic()
- var fnArg reflect.Type
-
- for idx, arg := range currArgs {
- pv, err := arg.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
-
- if isVariadic {
- if idx >= t.NumIn()-1 {
- fnArg = t.In(numArgs - 1).Elem()
- } else {
- fnArg = t.In(idx)
- }
- } else {
- fnArg = t.In(idx)
- }
-
- if fnArg != typeOfValuePtr {
- // Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument
- if !isVariadic {
- if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
- return nil, fmt.Errorf("Function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T).",
- idx, vr.String(), fnArg.String(), pv.Interface())
- }
- // Function's argument has another type, using the interface-value
- parameters = append(parameters, reflect.ValueOf(pv.Interface()))
- } else {
- if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
- return nil, fmt.Errorf("Function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T).",
- vr.String(), fnArg.String(), pv.Interface())
- }
- // Function's argument has another type, using the interface-value
- parameters = append(parameters, reflect.ValueOf(pv.Interface()))
- }
- } else {
- // Function's argument is a *pongo2.Value
- parameters = append(parameters, reflect.ValueOf(pv))
- }
- }
-
- // Check if any of the values are invalid
- for _, p := range parameters {
- if p.Kind() == reflect.Invalid {
- return nil, fmt.Errorf("Calling a function using an invalid parameter")
- }
- }
-
- // Call it and get first return parameter back
- values := current.Call(parameters)
- rv := values[0]
- if t.NumOut() == 2 {
- e := values[1].Interface()
- if e != nil {
- err, ok := e.(error)
- if !ok {
- return nil, fmt.Errorf("The second return value is not an error")
- }
- if err != nil {
- return nil, err
- }
- }
- }
-
- if rv.Type() != typeOfValuePtr {
- current = reflect.ValueOf(rv.Interface())
- } else {
- // Return the function call value
- current = rv.Interface().(*Value).val
- isSafe = rv.Interface().(*Value).safe
- }
- }
-
- if !current.IsValid() {
- // Value is not valid (e. g. NIL value)
- return AsValue(nil), nil
- }
- }
-
- return &Value{val: current, safe: isSafe}, nil
-}
-
-func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- value, err := vr.resolve(ctx)
- if err != nil {
- return AsValue(nil), ctx.Error(err.Error(), vr.locationToken)
- }
- return value, nil
-}
-
-func (v *nodeFilteredVariable) FilterApplied(name string) bool {
- for _, filter := range v.filterChain {
- if filter.name == name {
- return true
- }
- }
- return false
-}
-
-func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- value, err := v.resolver.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
-
- for _, filter := range v.filterChain {
- value, err = filter.Execute(value, ctx)
- if err != nil {
- return nil, err
- }
- }
-
- return value, nil
-}
-
-// IDENT | IDENT.(IDENT|NUMBER)...
-func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) {
- t := p.Current()
-
- if t == nil {
- return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken)
- }
-
- // Is first part a number or a string, there's nothing to resolve (because there's only to return the value then)
- switch t.Typ {
- case TokenNumber:
- p.Consume()
-
- // One exception to the rule that we don't have float64 literals is at the beginning
- // of an expression (or a variable name). Since we know we started with an integer
- // which can't obviously be a variable name, we can check whether the first number
- // is followed by dot (and then a number again). If so we're converting it to a float64.
-
- if p.Match(TokenSymbol, ".") != nil {
- // float64
- t2 := p.MatchType(TokenNumber)
- if t2 == nil {
- return nil, p.Error("Expected a number after the '.'.", nil)
- }
- f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64)
- if err != nil {
- return nil, p.Error(err.Error(), t)
- }
- fr := &floatResolver{
- locationToken: t,
- val: f,
- }
- return fr, nil
- }
- i, err := strconv.Atoi(t.Val)
- if err != nil {
- return nil, p.Error(err.Error(), t)
- }
- nr := &intResolver{
- locationToken: t,
- val: i,
- }
- return nr, nil
-
- case TokenString:
- p.Consume()
- sr := &stringResolver{
- locationToken: t,
- val: t.Val,
- }
- return sr, nil
- case TokenKeyword:
- p.Consume()
- switch t.Val {
- case "true":
- br := &boolResolver{
- locationToken: t,
- val: true,
- }
- return br, nil
- case "false":
- br := &boolResolver{
- locationToken: t,
- val: false,
- }
- return br, nil
- default:
- return nil, p.Error("This keyword is not allowed here.", nil)
- }
- }
-
- resolver := &variableResolver{
- locationToken: t,
- }
-
- // First part of a variable MUST be an identifier
- if t.Typ != TokenIdentifier {
- return nil, p.Error("Expected either a number, string, keyword or identifier.", t)
- }
-
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeIdent,
- s: t.Val,
- })
-
- p.Consume() // we consumed the first identifier of the variable name
-
-variableLoop:
- for p.Remaining() > 0 {
- t = p.Current()
-
- if p.Match(TokenSymbol, ".") != nil {
- // Next variable part (can be either NUMBER or IDENT)
- t2 := p.Current()
- if t2 != nil {
- switch t2.Typ {
- case TokenIdentifier:
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeIdent,
- s: t2.Val,
- })
- p.Consume() // consume: IDENT
- continue variableLoop
- case TokenNumber:
- i, err := strconv.Atoi(t2.Val)
- if err != nil {
- return nil, p.Error(err.Error(), t2)
- }
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeInt,
- i: i,
- })
- p.Consume() // consume: NUMBER
- continue variableLoop
- default:
- return nil, p.Error("This token is not allowed within a variable name.", t2)
- }
- } else {
- // EOF
- return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.",
- p.lastToken)
- }
- } else if p.Match(TokenSymbol, "(") != nil {
- // Function call
- // FunctionName '(' Comma-separated list of expressions ')'
- part := resolver.parts[len(resolver.parts)-1]
- part.isFunctionCall = true
- argumentLoop:
- for {
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken)
- }
-
- if p.Peek(TokenSymbol, ")") == nil {
- // No closing bracket, so we're parsing an expression
- exprArg, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- part.callingArgs = append(part.callingArgs, exprArg)
-
- if p.Match(TokenSymbol, ")") != nil {
- // If there's a closing bracket after an expression, we will stop parsing the arguments
- break argumentLoop
- } else {
- // If there's NO closing bracket, there MUST be an comma
- if p.Match(TokenSymbol, ",") == nil {
- return nil, p.Error("Missing comma or closing bracket after argument.", nil)
- }
- }
- } else {
- // We got a closing bracket, so stop parsing arguments
- p.Consume()
- break argumentLoop
- }
-
- }
- // We're done parsing the function call, next variable part
- continue variableLoop
- }
-
- // No dot or function call? Then we're done with the variable parsing
- break
- }
-
- return resolver, nil
-}
-
-func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) {
- v := &nodeFilteredVariable{
- locationToken: p.Current(),
- }
-
- // Parse the variable name
- resolver, err := p.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- v.resolver = resolver
-
- // Parse all the filters
-filterLoop:
- for p.Match(TokenSymbol, "|") != nil {
- // Parse one single filter
- filter, err := p.parseFilter()
- if err != nil {
- return nil, err
- }
-
- // Check sandbox filter restriction
- if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned {
- return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil)
- }
-
- v.filterChain = append(v.filterChain, filter)
-
- continue filterLoop
- }
-
- return v, nil
-}
-
-func (p *Parser) parseVariableElement() (INode, *Error) {
- node := &nodeVariable{
- locationToken: p.Current(),
- }
-
- p.Consume() // consume '{{'
-
- expr, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- node.expr = expr
-
- if p.Match(TokenSymbol, "}}") == nil {
- return nil, p.Error("'}}' expected", nil)
- }
-
- return node, nil
-}
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
index 94ff801d..0ed62c1a 100644
--- a/vendor/github.com/go-logr/logr/.golangci.yaml
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -1,29 +1,28 @@
+version: "2"
+
run:
timeout: 1m
tests: true
linters:
- disable-all: true
- enable:
+ default: none
+ enable: # please keep this alphabetized
+ - asasalint
- asciicheck
- - deadcode
+ - copyloopvar
+ - dupl
- errcheck
- forcetypeassert
+ - goconst
- gocritic
- - gofmt
- - goimports
- - gosimple
- govet
- ineffassign
- misspell
+ - musttag
- revive
- staticcheck
- - structcheck
- - typecheck
- unused
- - varcheck
issues:
- exclude-use-default: false
max-issues-per-linter: 0
max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
index ab593118..7c7f0c69 100644
--- a/vendor/github.com/go-logr/logr/README.md
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -1,6 +1,8 @@
# A minimal logging API for Go
[](https://pkg.go.dev/github.com/go-logr/logr)
+[](https://goreportcard.com/report/github.com/go-logr/logr)
+[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
without becoming coupled to a particular logging implementation. This is not
@@ -73,6 +75,30 @@ received:
If the Go standard library had defined an interface for logging, this project
probably would not be needed. Alas, here we are.
+When the Go developers started developing such an interface with
+[slog](https://github.com/golang/go/issues/56345), they adopted some of the
+logr design but also left out some parts and changed others:
+
+| Feature | logr | slog |
+|---------|------|------|
+| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
+| Low-level API | `LogSink` | `Handler` |
+| Stack unwinding | done by `LogSink` | done by `Logger` |
+| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
+| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
+| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
+| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
+| Passing logger via context | `NewContext`, `FromContext` | no API |
+| Adding a name to a logger | `WithName` | no API |
+| Modify verbosity of log entries in a call chain | `V` | no API |
+| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
+| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
+
+The high-level slog API is explicitly meant to be one of many different APIs
+that can be layered on top of a shared `slog.Handler`. logr is one such
+alternative API, with [interoperability](#slog-interoperability) provided by
+some conversion functions.
+
### Inspiration
Before you consider this package, please read [this blog post by the
@@ -118,6 +144,103 @@ There are implementations for the following logging libraries:
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
+## slog interoperability
+
+Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
+and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
+`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
+As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
+slog API.
+
+### Using a `logr.LogSink` as backend for slog
+
+Ideally, a logr sink implementation should support both logr and slog by
+implementing both the normal logr interface(s) and `SlogSink`. Because
+of a conflict in the parameters of the common `Enabled` method, it is [not
+possible to implement both slog.Handler and logr.Sink in the same
+type](https://github.com/golang/go/issues/59110).
+
+If both are supported, log calls can go from the high-level APIs to the backend
+without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
+convert back and forth without adding additional wrappers, with one exception:
+when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
+`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
+log calls.
+
+Such an implementation should also support values that implement specific
+interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
+`slog.GroupValue`). logr does not convert those.
+
+Not supporting slog has several drawbacks:
+- Recording source code locations works correctly if the handler gets called
+ through `slog.Logger`, but may be wrong in other cases. That's because a
+ `logr.Sink` does its own stack unwinding instead of using the program counter
+ provided by the high-level API.
+- slog levels <= 0 can be mapped to logr levels by negating the level without a
+ loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
+ used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
+ because logr does not support "more important than info" levels.
+- The slog group concept is supported by prefixing each key in a key/value
+ pair with the group names, separated by a dot. For structured output like
+ JSON it would be better to group the key/value pairs inside an object.
+- Special slog values and interfaces don't work as expected.
+- The overhead is likely to be higher.
+
+These drawbacks are severe enough that applications using a mixture of slog and
+logr should switch to a different backend.
+
+### Using a `slog.Handler` as backend for logr
+
+Using a plain `slog.Handler` without support for logr works better than the
+other direction:
+- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
+ by negating them.
+- Stack unwinding is done by the `SlogSink` and the resulting program
+ counter is passed to the `slog.Handler`.
+- Names added via `Logger.WithName` are gathered and recorded in an additional
+ attribute with `logger` as key and the names separated by slash as value.
+- `Logger.Error` is turned into a log record with `slog.LevelError` as level
+ and an additional attribute with `err` as key, if an error was provided.
+
+The main drawback is that `logr.Marshaler` will not be supported. Types should
+ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
+with logr implementations without slog support is not important, then
+`slog.Valuer` is sufficient.
+
+### Context support for slog
+
+Storing a logger in a `context.Context` is not supported by
+slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
+used to fill this gap. They store and retrieve a `slog.Logger` pointer
+under the same context key that is also used by `NewContext` and
+`FromContext` for `logr.Logger` value.
+
+When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
+automatically convert the `slog.Logger` to a
+`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
+
+With this approach, binaries which use either slog or logr are as efficient as
+possible with no unnecessary allocations. This is also why the API stores a
+`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
+on retrieval would need to allocate one.
+
+The downside is that switching back and forth needs more allocations. Because
+logr is the API that is already in use by different packages, in particular
+Kubernetes, the recommendation is to use the `logr.Logger` API in code which
+uses contextual logging.
+
+An alternative to adding values to a logger and storing that logger in the
+context is to store the values in the context and to configure a logging
+backend to extract those values when emitting log entries. This only works when
+log calls are passed the context, which is not supported by the logr API.
+
+With the slog API, it is possible, but not
+required. https://github.com/veqryn/slog-context is a package for slog which
+provides additional support code for this approach. It also contains wrappers
+for the context functions in logr, so developers who prefer to not use the logr
+APIs directly can use those instead and the resulting code will still be
+interoperable with logr.
+
## FAQ
### Conceptual
@@ -241,7 +364,9 @@ Otherwise, you can start out with `0` as "you always want to see this",
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
-info-type logs.)
+info-type logs). For reference, slog pre-defines -4 for debug logs
+(corresponds to 4 in logr), which matches what is
+[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
#### How do I choose my keys?
diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md
new file mode 100644
index 00000000..1ca756fc
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/SECURITY.md
@@ -0,0 +1,18 @@
+# Security Policy
+
+If you have discovered a security vulnerability in this project, please report it
+privately. **Do not disclose it as a public issue.** This gives us time to work with you
+to fix the issue before public exposure, reducing the chance that the exploit will be
+used before a patch is released.
+
+You may submit the report in the following ways:
+
+- send an email to go-logr-security@googlegroups.com
+- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
+
+Please provide the following information in your report:
+
+- A description of the vulnerability and its impact
+- How to reproduce the issue
+
+We ask that you give us 90 days to work on a fix before public exposure.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/github.com/go-logr/logr/context.go
similarity index 51%
rename from vendor/gopkg.in/yaml.v2/NOTICE
rename to vendor/github.com/go-logr/logr/context.go
index 866d74a7..de8bcc3a 100644
--- a/vendor/gopkg.in/yaml.v2/NOTICE
+++ b/vendor/github.com/go-logr/logr/context.go
@@ -1,4 +1,5 @@
-Copyright 2011-2016 Canonical Ltd.
+/*
+Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -11,3 +12,22 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+
+package logr
+
+// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
+// the value is always a Logger value. With Go >= 1.21, the value can be a
+// Logger value or a slog.Logger pointer.
+type contextKey struct{}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+ return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+ return true
+}
diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go
new file mode 100644
index 00000000..f012f9a1
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_noslog.go
@@ -0,0 +1,49 @@
+//go:build !go1.21
+// +build !go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v, nil
+ }
+
+ return Logger{}, notFoundError{}
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v
+ }
+
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go
new file mode 100644
index 00000000..065ef0b8
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_slog.go
@@ -0,0 +1,83 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return Logger{}, notFoundError{}
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return v, nil
+ case *slog.Logger:
+ return FromSlogHandler(v.Handler()), nil
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
+func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return nil
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return slog.New(ToSlogHandler(v))
+ case *slog.Logger:
+ return v
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if logger, err := FromContext(ctx); err == nil {
+ return logger
+ }
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
+// provided slog.Logger.
+func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
index 9d92a38f..99fe8be9 100644
--- a/vendor/github.com/go-logr/logr/discard.go
+++ b/vendor/github.com/go-logr/logr/discard.go
@@ -20,35 +20,5 @@ package logr
// used whenever the caller is not interested in the logs. Logger instances
// produced by this function always compare as equal.
func Discard() Logger {
- return Logger{
- level: 0,
- sink: discardLogSink{},
- }
-}
-
-// discardLogSink is a LogSink that discards all messages.
-type discardLogSink struct{}
-
-// Verify that it actually implements the interface
-var _ LogSink = discardLogSink{}
-
-func (l discardLogSink) Init(RuntimeInfo) {
-}
-
-func (l discardLogSink) Enabled(int) bool {
- return false
-}
-
-func (l discardLogSink) Info(int, string, ...interface{}) {
-}
-
-func (l discardLogSink) Error(error, string, ...interface{}) {
-}
-
-func (l discardLogSink) WithValues(...interface{}) LogSink {
- return l
-}
-
-func (l discardLogSink) WithName(string) LogSink {
- return l
+ return New(nil)
}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
index 7accdb0c..b22c57d7 100644
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -21,13 +21,13 @@ limitations under the License.
// github.com/go-logr/logr.LogSink with output through an arbitrary
// "write" function. See New and NewJSON for details.
//
-// Custom LogSinks
+// # Custom LogSinks
//
// For users who need more control, a funcr.Formatter can be embedded inside
// your own custom LogSink implementation. This is useful when the LogSink
// needs to implement additional methods, for example.
//
-// Formatting
+// # Formatting
//
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
// values which are being logged. When rendering a struct, funcr will use Go's
@@ -37,6 +37,7 @@ package funcr
import (
"bytes"
"encoding"
+ "encoding/json"
"fmt"
"path/filepath"
"reflect"
@@ -76,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
write: fn,
}
// For skipping fnlogger.Info and fnlogger.Error.
- l.Formatter.AddCallDepth(1)
+ l.AddCallDepth(1) // via Formatter
return l
}
@@ -99,6 +100,11 @@ type Options struct {
// details, see docs for Go's time.Layout.
TimestampFormat string
+ // LogInfoLevel tells funcr what key to use to log the info level.
+ // If not specified, the info level will be logged as "level".
+ // If this is set to "", the info level will not be logged at all.
+ LogInfoLevel *string
+
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
@@ -115,17 +121,17 @@ type Options struct {
// Equivalent hooks are offered for key-value pairs saved via
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
// for user-provided pairs (see RenderArgsHook).
- RenderBuiltinsHook func(kvList []interface{}) []interface{}
+ RenderBuiltinsHook func(kvList []any) []any
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
// only called for key-value pairs saved via logr.Logger.WithValues. See
// RenderBuiltinsHook for more details.
- RenderValuesHook func(kvList []interface{}) []interface{}
+ RenderValuesHook func(kvList []any) []any
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
// called for key-value pairs passed directly to Info and Error. See
// RenderBuiltinsHook for more details.
- RenderArgsHook func(kvList []interface{}) []interface{}
+ RenderArgsHook func(kvList []any) []any
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
// that contains a struct, etc.) it may log. Every time it finds a struct,
@@ -158,26 +164,26 @@ type fnlogger struct {
}
func (l fnlogger) WithName(name string) logr.LogSink {
- l.Formatter.AddName(name)
+ l.AddName(name) // via Formatter
return &l
}
-func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
- l.Formatter.AddValues(kvList)
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
+ l.AddValues(kvList) // via Formatter
return &l
}
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
- l.Formatter.AddCallDepth(depth)
+ l.AddCallDepth(depth) // via Formatter
return &l
}
-func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
prefix, args := l.FormatInfo(level, msg, kvList)
l.write(prefix, args)
}
-func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
prefix, args := l.FormatError(err, msg, kvList)
l.write(prefix, args)
}
@@ -212,12 +218,16 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
+ if opts.LogInfoLevel == nil {
+ opts.LogInfoLevel = new(string)
+ *opts.LogInfoLevel = "level"
+ }
f := Formatter{
outputFormat: outfmt,
prefix: "",
values: nil,
depth: 0,
- opts: opts,
+ opts: &opts,
}
return f
}
@@ -228,10 +238,12 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
type Formatter struct {
outputFormat outputFormat
prefix string
- values []interface{}
+ values []any
valuesStr string
depth int
- opts Options
+ opts *Options
+ groupName string // for slog groups
+ groups []groupDef
}
// outputFormat indicates which outputFormat to use.
@@ -244,70 +256,139 @@ const (
outputJSON
)
+// groupDef represents a saved group. The values may be empty, but we don't
+// know if we need to render the group until the final record is rendered.
+type groupDef struct {
+ name string
+ values string
+}
+
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
-type PseudoStruct []interface{}
+type PseudoStruct []any
// render produces a log line, ready to use.
-func (f Formatter) render(builtins, args []interface{}) string {
+func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
if f.outputFormat == outputJSON {
- buf.WriteByte('{')
+ buf.WriteByte('{') // for the whole record
}
+
+ // Render builtins
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
- f.flatten(buf, vals, false, false) // keys are ours, no need to escape
+ f.flatten(buf, vals, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
- if len(f.valuesStr) > 0 {
- if continuing {
- if f.outputFormat == outputJSON {
- buf.WriteByte(',')
- } else {
- buf.WriteByte(' ')
- }
+
+ // Turn the inner-most group into a string
+ argsStr := func() string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
}
- continuing = true
- buf.WriteString(f.valuesStr)
+ f.flatten(buf, vals, true) // escape user-provided keys
+
+ return buf.String()
+ }()
+
+ // Render the stack of groups from the inside out.
+ bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
+ for i := len(f.groups) - 1; i >= 0; i-- {
+ grp := &f.groups[i]
+ if grp.values == "" && bodyStr == "" {
+ // no contents, so we must elide the whole group
+ continue
+ }
+ bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
}
- vals = args
- if hook := f.opts.RenderArgsHook; hook != nil {
- vals = hook(f.sanitize(vals))
+
+ if bodyStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(bodyStr)
}
- f.flatten(buf, vals, continuing, true) // escape user-provided keys
+
if f.outputFormat == outputJSON {
- buf.WriteByte('}')
+ buf.WriteByte('}') // for the whole record
}
+
return buf.String()
}
-// flatten renders a list of key-value pairs into a buffer. If continuing is
-// true, it assumes that the buffer has previous values and will emit a
-// separator (which depends on the output format) before the first pair it
-// writes. If escapeKeys is true, the keys are assumed to have
-// non-JSON-compatible characters in them and must be evaluated for escapes.
+// renderGroup returns a string representation of the named group with rendered
+// values and args. If the name is empty, this will return the values and args,
+// joined. If the name is not empty, this will return a single key-value pair,
+// where the value is a grouping of the values and args. If the values and
+// args are both empty, this will return an empty string, even if the name was
+// specified.
+func (f Formatter) renderGroup(name string, values string, args string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ needClosingBrace := false
+ if name != "" && (values != "" || args != "") {
+ buf.WriteString(f.quoted(name, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{')
+ needClosingBrace = true
+ }
+
+ continuing := false
+ if values != "" {
+ buf.WriteString(values)
+ continuing = true
+ }
+
+ if args != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(args)
+ }
+
+ if needClosingBrace {
+ buf.WriteByte('}')
+ }
+
+ return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
+// true, the keys are assumed to have non-JSON-compatible characters in them
+// and must be evaluated for escapes.
//
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
-func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
+ copied := false
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
+ if !copied {
+ newList := make([]any, len(kvList))
+ copy(newList, kvList)
+ kvList = newList
+ copied = true
+ }
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
v := kvList[i+1]
- if i > 0 || continuing {
+ if i > 0 {
if f.outputFormat == outputJSON {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
@@ -315,25 +396,36 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b
}
}
- if escapeKeys {
- buf.WriteString(prettyString(k))
- } else {
- // this is faster
- buf.WriteByte('"')
- buf.WriteString(k)
- buf.WriteByte('"')
- }
- if f.outputFormat == outputJSON {
- buf.WriteByte(':')
- } else {
- buf.WriteByte('=')
- }
+ buf.WriteString(f.quoted(k, escapeKeys))
+ buf.WriteByte(f.colon())
buf.WriteString(f.pretty(v))
}
return kvList
}
-func (f Formatter) pretty(value interface{}) string {
+func (f Formatter) quoted(str string, escape bool) string {
+ if escape {
+ return prettyString(str)
+ }
+ // this is faster
+ return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+ if f.outputFormat == outputJSON {
+ return ','
+ }
+ return ' '
+}
+
+func (f Formatter) colon() byte {
+ if f.outputFormat == outputJSON {
+ return ':'
+ }
+ return '='
+}
+
+func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0)
}
@@ -342,7 +434,7 @@ const (
)
// TODO: This is not fast. Most of the overhead goes here.
-func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
if depth > f.opts.MaxLogDepth {
return `""`
}
@@ -406,12 +498,12 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
}
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping
buf.WriteString(prettyString(k))
- buf.WriteByte(':')
+ buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
@@ -447,6 +539,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
for i := 0; i < t.NumField(); i++ {
fld := t.Field(i)
if fld.PkgPath != "" {
@@ -478,9 +571,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
if omitempty && isEmpty(v.Field(i)) {
continue
}
- if i > 0 {
- buf.WriteByte(',')
+ if printComma {
+ buf.WriteByte(f.comma())
}
+ printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
continue
@@ -489,10 +583,8 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
name = fld.Name
}
// field names can't contain characters which need escaping
- buf.WriteByte('"')
- buf.WriteString(name)
- buf.WriteByte('"')
- buf.WriteByte(':')
+ buf.WriteString(f.quoted(name, false))
+ buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
@@ -500,10 +592,24 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
}
return buf.String()
case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
@@ -517,7 +623,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
i := 0
for it.Next() {
if i > 0 {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
}
// If a map key supports TextMarshaler, use it.
keystr := ""
@@ -539,7 +645,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
}
}
buf.WriteString(keystr)
- buf.WriteByte(':')
+ buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
@@ -597,7 +703,7 @@ func isEmpty(v reflect.Value) bool {
return false
}
-func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
+func invokeMarshaler(m logr.Marshaler) (ret any) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("", r)
@@ -658,12 +764,12 @@ func (f Formatter) caller() Caller {
const noValue = ""
-func (f Formatter) nonStringKey(v interface{}) string {
+func (f Formatter) nonStringKey(v any) string {
return fmt.Sprintf("", f.snippet(v))
}
// snippet produces a short snippet string of an arbitrary value.
-func (f Formatter) snippet(v interface{}) string {
+func (f Formatter) snippet(v any) string {
const snipLen = 16
snip := f.pretty(v)
@@ -676,7 +782,7 @@ func (f Formatter) snippet(v interface{}) string {
// sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key
// if needed).
-func (f Formatter) sanitize(kvList []interface{}) []interface{} {
+func (f Formatter) sanitize(kvList []any) []any {
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
@@ -689,6 +795,24 @@ func (f Formatter) sanitize(kvList []interface{}) []interface{} {
return kvList
}
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew. This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(name string) {
+ // Unnamed groups are just inlined.
+ if name == "" {
+ return
+ }
+
+ n := len(f.groups)
+ f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
+
+ // Start collecting new values.
+ f.groupName = name
+ f.valuesStr = ""
+ f.values = nil
+}
+
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
@@ -710,8 +834,8 @@ func (f Formatter) GetDepth() int {
// FormatInfo renders an Info log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
-func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
- args := make([]interface{}, 0, 64) // using a constant here impacts perf
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
@@ -723,15 +847,18 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
- args = append(args, "level", level, "msg", msg)
+ if key := *f.opts.LogInfoLevel; key != "" {
+ args = append(args, key, level)
+ }
+ args = append(args, "msg", msg)
return prefix, f.render(args, kvList)
}
// FormatError renders an Error log message into strings. The prefix will be
-// empty when no names were set (via AddNames), or when the output is
+// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
-func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
- args := make([]interface{}, 0, 64) // using a constant here impacts perf
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
@@ -744,12 +871,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre
args = append(args, "caller", f.caller())
}
args = append(args, "msg", msg)
- var loggableErr interface{}
+ var loggableErr any
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
- return f.prefix, f.render(args, kvList)
+ return prefix, f.render(args, kvList)
}
// AddName appends the specified name. funcr uses '/' characters to separate
@@ -764,7 +891,7 @@ func (f *Formatter) AddName(name string) {
// AddValues adds key-value pairs to the set of saved values to be logged with
// each log line.
-func (f *Formatter) AddValues(kvList []interface{}) {
+func (f *Formatter) AddValues(kvList []any) {
// Three slice args forces a copy.
n := len(f.values)
f.values = append(f.values[:n:n], kvList...)
@@ -776,7 +903,7 @@ func (f *Formatter) AddValues(kvList []interface{}) {
// Pre-render values, so we don't have to do it on each Info/Error call.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
- f.flatten(buf, vals, false, true) // escape user-provided keys
+ f.flatten(buf, vals, true) // escape user-provided keys
f.valuesStr = buf.String()
}
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 00000000..7bd84761
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, kvList)
+ return true
+ })
+
+ if record.Level >= slog.LevelError {
+ l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, kvList)
+ }
+ l.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+ l.startGroup(name)
+ return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, grpKVs)
+ }
+ if attr.Key == "" {
+ // slog says we have to inline these
+ kvList = append(kvList, grpKVs...)
+ } else {
+ kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+ }
+ } else if attr.Key != "" {
+ kvList = append(kvList, attr.Key, attrVal.Any())
+ }
+
+ return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+ result := -level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
index c3b56b3d..b4428e10 100644
--- a/vendor/github.com/go-logr/logr/logr.go
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -21,7 +21,7 @@ limitations under the License.
// to back that API. Packages in the Go ecosystem can depend on this package,
// while callers can implement logging with whatever backend is appropriate.
//
-// Usage
+// # Usage
//
// Logging is done using a Logger instance. Logger is a concrete type with
// methods, which defers the actual logging to a LogSink interface. The main
@@ -30,16 +30,20 @@ limitations under the License.
// "structured logging".
//
// With Go's standard log package, we might write:
-// log.Printf("setting target value %s", targetValue)
+//
+// log.Printf("setting target value %s", targetValue)
//
// With logr's structured logging, we'd write:
-// logger.Info("setting target", "value", targetValue)
+//
+// logger.Info("setting target", "value", targetValue)
//
// Errors are much the same. Instead of:
-// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
//
// We'd write:
-// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// logger.Error(err, "failed to open the pod bay door", "user", user)
//
// Info() and Error() are very similar, but they are separate methods so that
// LogSink implementations can choose to do things like attach additional
@@ -47,7 +51,7 @@ limitations under the License.
// always logged, regardless of the current verbosity. If there is no error
// instance available, passing nil is valid.
//
-// Verbosity
+// # Verbosity
//
// Often we want to log information only when the application in "verbose
// mode". To write log lines that are more verbose, Logger has a V() method.
@@ -58,20 +62,22 @@ limitations under the License.
// Error messages do not have a verbosity level and are always logged.
//
// Where we might have written:
-// if flVerbose >= 2 {
-// log.Printf("an unusual thing happened")
-// }
+//
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
//
// We can write:
-// logger.V(2).Info("an unusual thing happened")
//
-// Logger Names
+// logger.V(2).Info("an unusual thing happened")
+//
+// # Logger Names
//
// Logger instances can have name strings so that all messages logged through
// that instance have additional context. For example, you might want to add
// a subsystem name:
//
-// logger.WithName("compactor").Info("started", "time", time.Now())
+// logger.WithName("compactor").Info("started", "time", time.Now())
//
// The WithName() method returns a new Logger, which can be passed to
// constructors or other functions for further use. Repeated use of WithName()
@@ -82,25 +88,27 @@ limitations under the License.
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
// quotes, etc).
//
-// Saved Values
+// # Saved Values
//
// Logger instances can store any number of key/value pairs, which will be
// logged alongside all messages logged through that instance. For example,
// you might want to create a Logger instance per managed object:
//
// With the standard log package, we might write:
-// log.Printf("decided to set field foo to value %q for object %s/%s",
-// targetValue, object.Namespace, object.Name)
+//
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
//
// With logr we'd write:
-// // Elsewhere: set up the logger to log the object name.
-// obj.logger = mainLogger.WithValues(
-// "name", obj.name, "namespace", obj.namespace)
//
-// // later on...
-// obj.logger.Info("setting foo", "value", targetValue)
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
//
-// Best Practices
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
+//
+// # Best Practices
//
// Logger has very few hard rules, with the goal that LogSink implementations
// might have a lot of freedom to differentiate. There are, however, some
@@ -119,20 +127,20 @@ limitations under the License.
// such a value can call its methods without having to check whether the
// instance is ready for use.
//
-// Calling methods with the null logger (Logger{}) as instance will crash
-// because it has no LogSink. Therefore this null logger should never be passed
-// around. For cases where passing a logger is optional, a pointer to Logger
+// The zero logger (= Logger{}) is identical to Discard() and discards all log
+// entries. Code that receives a Logger by value can simply call it, the methods
+// will never crash. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
-// Key Naming Conventions
+// # Key Naming Conventions
//
// Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they:
-// * be human-readable and meaningful (not auto-generated or simple ordinals)
-// * be constant (not dependent on input data)
-// * contain only printable characters
-// * not contain whitespace or punctuation
-// * use lower case for simple keys and lowerCamelCase for more complex ones
+// - be human-readable and meaningful (not auto-generated or simple ordinals)
+// - be constant (not dependent on input data)
+// - contain only printable characters
+// - not contain whitespace or punctuation
+// - use lower case for simple keys and lowerCamelCase for more complex ones
//
// These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to
@@ -141,51 +149,54 @@ limitations under the License.
// While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
-// * "caller": the calling information (file/line) of a particular log line
-// * "error": the underlying error value in the `Error` method
-// * "level": the log level
-// * "logger": the name of the associated logger
-// * "msg": the log message
-// * "stacktrace": the stack trace associated with a particular log line or
-// error (often from the `Error` message)
-// * "ts": the timestamp for a log line
+// - "caller": the calling information (file/line) of a particular log line
+// - "error": the underlying error value in the `Error` method
+// - "level": the log level
+// - "logger": the name of the associated logger
+// - "msg": the log message
+// - "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// - "ts": the timestamp for a log line
//
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
//
-// Break Glass
+// # Break Glass
//
// Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is:
-// // Underlier exposes access to the underlying logging implementation.
-// // Since callers only have a logr.Logger, they have to know which
-// // implementation is in use, so this interface is less of an abstraction
-// // and more of way to test type conversion.
-// type Underlier interface {
-// GetUnderlying()
-// }
+//
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying()
+// }
//
// Logger grants access to the sink to enable type assertions like this:
-// func DoSomethingWithImpl(log logr.Logger) {
-// if underlier, ok := log.GetSink()(impl.Underlier) {
-// implLogger := underlier.GetUnderlying()
-// ...
-// }
-// }
+//
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink().(impl.Underlier); ok {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
//
// Custom `With*` functions can be implemented by copying the complete
// Logger struct and replacing the sink in the copy:
-// // WithFooBar changes the foobar parameter in the log sink and returns a
-// // new logger with that modified sink. It does nothing for loggers where
-// // the sink doesn't support that parameter.
-// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
-// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
-// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
-// }
-// return log
-// }
+//
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
//
// Don't use New to construct a new Logger with a LogSink retrieved from an
// existing Logger. Source code attribution might not work correctly and
@@ -196,16 +207,15 @@ limitations under the License.
// those.
package logr
-import (
- "context"
-)
-
// New returns a new Logger instance. This is primarily used by libraries
-// implementing LogSink, rather than end users.
+// implementing LogSink, rather than end users. Passing a nil sink will create
+// a Logger which discards all log lines.
func New(sink LogSink) Logger {
logger := Logger{}
logger.setSink(sink)
- sink.Init(runtimeInfo)
+ if sink != nil {
+ sink.Init(runtimeInfo)
+ }
return logger
}
@@ -244,7 +254,13 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
- return l.sink.Enabled(l.level)
+ // Some implementations of LogSink look at the caller in Enabled (e.g.
+ // different verbosity levels per package or file), but we only pass one
+ // CallDepth in (via Init). This means that all calls from Logger to the
+ // LogSink's Enabled, Info, and Error methods must have the same number of
+ // frames. In other words, Logger methods can't call other Logger methods
+ // which call these LogSink methods unless we do it the same in all paths.
+ return l.sink != nil && l.sink.Enabled(l.level)
}
// Info logs a non-error message with the given key/value pairs as context.
@@ -253,8 +269,11 @@ func (l Logger) Enabled() bool {
// line. The key/value pairs can then be used to add additional variable
// information. The key/value pairs must alternate string keys and arbitrary
// values.
-func (l Logger) Info(msg string, keysAndValues ...interface{}) {
- if l.Enabled() {
+func (l Logger) Info(msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if l.sink.Enabled(l.level) { // see comment in Enabled
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@@ -272,7 +291,10 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
-func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
+func (l Logger) Error(err error, msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@@ -284,6 +306,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
// level means a log message is less important. Negative V-levels are treated
// as 0.
func (l Logger) V(level int) Logger {
+ if l.sink == nil {
+ return l
+ }
if level < 0 {
level = 0
}
@@ -291,9 +316,19 @@ func (l Logger) V(level int) Logger {
return l
}
+// GetV returns the verbosity level of the logger. If the logger's LogSink is
+// nil as in the Discard logger, this will always return 0.
+func (l Logger) GetV() int {
+ // 0 if l.sink nil because of the if check in V above.
+ return l.level
+}
+
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
-func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
+func (l Logger) WithValues(keysAndValues ...any) Logger {
+ if l.sink == nil {
+ return l
+ }
l.setSink(l.sink.WithValues(keysAndValues...))
return l
}
@@ -304,6 +339,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
// contain only letters, digits, and hyphens (see the package documentation for
// more information).
func (l Logger) WithName(name string) Logger {
+ if l.sink == nil {
+ return l
+ }
l.setSink(l.sink.WithName(name))
return l
}
@@ -324,6 +362,9 @@ func (l Logger) WithName(name string) Logger {
// WithCallDepth(1) because it works with implementions that support the
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
func (l Logger) WithCallDepth(depth int) Logger {
+ if l.sink == nil {
+ return l
+ }
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(depth))
}
@@ -345,6 +386,9 @@ func (l Logger) WithCallDepth(depth int) Logger {
// implementation does not support either of these, the original Logger will be
// returned.
func (l Logger) WithCallStackHelper() (func(), Logger) {
+ if l.sink == nil {
+ return func() {}, l
+ }
var helper func()
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(1))
@@ -357,43 +401,9 @@ func (l Logger) WithCallStackHelper() (func(), Logger) {
return helper, l
}
-// contextKey is how we find Loggers in a context.Context.
-type contextKey struct{}
-
-// FromContext returns a Logger from ctx or an error if no Logger is found.
-func FromContext(ctx context.Context) (Logger, error) {
- if v, ok := ctx.Value(contextKey{}).(Logger); ok {
- return v, nil
- }
-
- return Logger{}, notFoundError{}
-}
-
-// notFoundError exists to carry an IsNotFound method.
-type notFoundError struct{}
-
-func (notFoundError) Error() string {
- return "no logr.Logger was present"
-}
-
-func (notFoundError) IsNotFound() bool {
- return true
-}
-
-// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
-// returns a Logger that discards all log messages.
-func FromContextOrDiscard(ctx context.Context) Logger {
- if v, ok := ctx.Value(contextKey{}).(Logger); ok {
- return v
- }
-
- return Discard()
-}
-
-// NewContext returns a new Context, derived from ctx, which carries the
-// provided Logger.
-func NewContext(ctx context.Context, logger Logger) context.Context {
- return context.WithValue(ctx, contextKey{}, logger)
+// IsZero returns true if this logger is an uninitialized zero value
+func (l Logger) IsZero() bool {
+ return l.sink == nil
}
// RuntimeInfo holds information that the logr "core" library knows which
@@ -427,22 +437,22 @@ type LogSink interface {
// The level argument is provided for optional logging. This method will
// only be called when Enabled(level) is true. See Logger.Info for more
// details.
- Info(level int, msg string, keysAndValues ...interface{})
+ Info(level int, msg string, keysAndValues ...any)
// Error logs an error, with the given message and key/value pairs as
// context. See Logger.Error for more details.
- Error(err error, msg string, keysAndValues ...interface{})
+ Error(err error, msg string, keysAndValues ...any)
// WithValues returns a new LogSink with additional key/value pairs. See
// Logger.WithValues for more details.
- WithValues(keysAndValues ...interface{}) LogSink
+ WithValues(keysAndValues ...any) LogSink
// WithName returns a new LogSink with the specified name appended. See
// Logger.WithName for more details.
WithName(name string) LogSink
}
-// CallDepthLogSink represents a Logger that knows how to climb the call stack
+// CallDepthLogSink represents a LogSink that knows how to climb the call stack
// to identify the original call site and can offset the depth by a specified
// number of frames. This is useful for users who have helper functions
// between the "real" call site and the actual calls to Logger methods.
@@ -467,7 +477,7 @@ type CallDepthLogSink interface {
WithCallDepth(depth int) LogSink
}
-// CallStackHelperLogSink represents a Logger that knows how to climb
+// CallStackHelperLogSink represents a LogSink that knows how to climb
// the call stack to identify the original call site and can skip
// intermediate helper functions if they mark themselves as
// helper. Go's testing package uses that approach.
@@ -506,5 +516,5 @@ type Marshaler interface {
// with exported fields
//
// It may return any value of any type.
- MarshalLog() interface{}
+ MarshalLog() any
}
diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go
new file mode 100644
index 00000000..82d1ba49
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/sloghandler.go
@@ -0,0 +1,192 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+type slogHandler struct {
+ // May be nil, in which case all logs get discarded.
+ sink LogSink
+ // Non-nil if sink is non-nil and implements SlogSink.
+ slogSink SlogSink
+
+ // groupPrefix collects values from WithGroup calls. It gets added as
+ // prefix to value keys when handling a log record.
+ groupPrefix string
+
+ // levelBias can be set when constructing the handler to influence the
+ // slog.Level of log records. A positive levelBias reduces the
+ // slog.Level value. slog has no API to influence this value after the
+ // handler got created, so it can only be set indirectly through
+ // Logger.V.
+ levelBias slog.Level
+}
+
+var _ slog.Handler = &slogHandler{}
+
+// groupSeparator is used to concatenate WithGroup names and attribute keys.
+const groupSeparator = "."
+
+// GetLevel is used for black box unit testing.
+func (l *slogHandler) GetLevel() slog.Level {
+ return l.levelBias
+}
+
+func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
+ return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
+}
+
+func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
+ if l.slogSink != nil {
+ // Only adjust verbosity level of log entries < slog.LevelError.
+ if record.Level < slog.LevelError {
+ record.Level -= l.levelBias
+ }
+ return l.slogSink.Handle(ctx, record)
+ }
+
+ // No need to check for nil sink here because Handle will only be called
+ // when Enabled returned true.
+
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ return true
+ })
+ if record.Level >= slog.LevelError {
+ l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.sinkWithCallDepth().Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
+// are called by Handle, code in slog gets skipped.
+//
+// This offset currently (Go 1.21.0) works for calls through
+// slog.New(ToSlogHandler(...)). There's no guarantee that the call
+// chain won't change. Wrapping the handler will also break unwinding. It's
+// still better than not adjusting at all....
+//
+// This cannot be done when constructing the handler because FromSlogHandler needs
+// access to the original sink without this adjustment. A second copy would
+// work, but then WithAttrs would have to be called for both of them.
+func (l *slogHandler) sinkWithCallDepth() LogSink {
+ if sink, ok := l.sink.(CallDepthLogSink); ok {
+ return sink.WithCallDepth(2)
+ }
+ return l.sink
+}
+
+func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ if l.sink == nil || len(attrs) == 0 {
+ return l
+ }
+
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithAttrs(attrs)
+ clone.sink = clone.slogSink
+ } else {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ }
+ clone.sink = l.sink.WithValues(kvList...)
+ }
+ return &clone
+}
+
+func (l *slogHandler) WithGroup(name string) slog.Handler {
+ if l.sink == nil {
+ return l
+ }
+ if name == "" {
+ // slog says to inline empty groups
+ return l
+ }
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithGroup(name)
+ clone.sink = clone.slogSink
+ } else {
+ clone.groupPrefix = addPrefix(clone.groupPrefix, name)
+ }
+ return &clone
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ prefix := groupPrefix
+ if attr.Key != "" {
+ prefix = addPrefix(groupPrefix, attr.Key)
+ }
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, prefix, grpKVs)
+ }
+ kvList = append(kvList, grpKVs...)
+ } else if attr.Key != "" {
+ kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
+ }
+
+ return kvList
+}
+
+func addPrefix(prefix, name string) string {
+ if prefix == "" {
+ return name
+ }
+ if name == "" {
+ return prefix
+ }
+ return prefix + groupSeparator + name
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l *slogHandler) levelFromSlog(level slog.Level) int {
+ result := -level
+ result += l.levelBias // in case the original Logger had a V level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go
new file mode 100644
index 00000000..28a83d02
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogr.go
@@ -0,0 +1,100 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+// FromSlogHandler returns a Logger which writes to the slog.Handler.
+//
+// The logr verbosity level is mapped to slog levels such that V(0) becomes
+// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+func FromSlogHandler(handler slog.Handler) Logger {
+ if handler, ok := handler.(*slogHandler); ok {
+ if handler.sink == nil {
+ return Discard()
+ }
+ return New(handler.sink).V(int(handler.levelBias))
+ }
+ return New(&slogSink{handler: handler})
+}
+
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
+//
+// The returned logger writes all records with level >= slog.LevelError as
+// error log entries with LogSink.Error, regardless of the verbosity level of
+// the Logger:
+//
+// logger :=
+// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
+//
+// The level of all other records gets reduced by the verbosity
+// level of the Logger and the result is negated. If it happens
+// to be negative, then it gets replaced by zero because a LogSink
+// is not expected to handled negative levels:
+//
+// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
+// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+func ToSlogHandler(logger Logger) slog.Handler {
+ if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
+ return sink.handler
+ }
+
+ handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
+ if slogSink, ok := handler.sink.(SlogSink); ok {
+ handler.slogSink = slogSink
+ }
+ return handler
+}
+
+// SlogSink is an optional interface that a LogSink can implement to support
+// logging through the slog.Logger or slog.Handler APIs better. It then should
+// also support special slog values like slog.Group. When used as a
+// slog.Handler, the advantages are:
+//
+// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
+// as intended by slog
+// - proper grouping of key/value pairs via WithGroup
+// - verbosity levels > slog.LevelInfo can be recorded
+// - less overhead
+//
+// Both APIs (Logger and slog.Logger/Handler) then are supported equally
+// well. Developers can pick whatever API suits them better and/or mix
+// packages which use either API in the same binary with a common logging
+// implementation.
+//
+// This interface is necessary because the type implementing the LogSink
+// interface cannot also implement the slog.Handler interface due to the
+// different prototype of the common Enabled method.
+//
+// An implementation could support both interfaces in two different types, but then
+// additional interfaces would be needed to convert between those types in FromSlogHandler
+// and ToSlogHandler.
+type SlogSink interface {
+ LogSink
+
+ Handle(ctx context.Context, record slog.Record) error
+ WithAttrs(attrs []slog.Attr) SlogSink
+ WithGroup(name string) SlogSink
+}
diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go
new file mode 100644
index 00000000..4060fcbc
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogsink.go
@@ -0,0 +1,120 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+ "runtime"
+ "time"
+)
+
+var (
+ _ LogSink = &slogSink{}
+ _ CallDepthLogSink = &slogSink{}
+ _ Underlier = &slogSink{}
+)
+
+// Underlier is implemented by the LogSink returned by NewFromLogHandler.
+type Underlier interface {
+ // GetUnderlying returns the Handler used by the LogSink.
+ GetUnderlying() slog.Handler
+}
+
+const (
+ // nameKey is used to log the `WithName` values as an additional attribute.
+ nameKey = "logger"
+
+ // errKey is used to log the error parameter of Error as an additional attribute.
+ errKey = "err"
+)
+
+type slogSink struct {
+ callDepth int
+ name string
+ handler slog.Handler
+}
+
+func (l *slogSink) Init(info RuntimeInfo) {
+ l.callDepth = info.CallDepth
+}
+
+func (l *slogSink) GetUnderlying() slog.Handler {
+ return l.handler
+}
+
+func (l *slogSink) WithCallDepth(depth int) LogSink {
+ newLogger := *l
+ newLogger.callDepth += depth
+ return &newLogger
+}
+
+func (l *slogSink) Enabled(level int) bool {
+ return l.handler.Enabled(context.Background(), slog.Level(-level))
+}
+
+func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
+ l.log(nil, msg, slog.Level(-level), kvList...)
+}
+
+func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
+ l.log(err, msg, slog.LevelError, kvList...)
+}
+
+func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
+ var pcs [1]uintptr
+ // skip runtime.Callers, this function, Info/Error, and all helper functions above that.
+ runtime.Callers(3+l.callDepth, pcs[:])
+
+ record := slog.NewRecord(time.Now(), level, msg, pcs[0])
+ if l.name != "" {
+ record.AddAttrs(slog.String(nameKey, l.name))
+ }
+ if err != nil {
+ record.AddAttrs(slog.Any(errKey, err))
+ }
+ record.Add(kvList...)
+ _ = l.handler.Handle(context.Background(), record)
+}
+
+func (l slogSink) WithName(name string) LogSink {
+ if l.name != "" {
+ l.name += "/"
+ }
+ l.name += name
+ return &l
+}
+
+func (l slogSink) WithValues(kvList ...interface{}) LogSink {
+ l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
+ return &l
+}
+
+func kvListToAttrs(kvList ...interface{}) []slog.Attr {
+ // We don't need the record itself, only its Add method.
+ record := slog.NewRecord(time.Time{}, 0, "", 0)
+ record.Add(kvList...)
+ attrs := make([]slog.Attr, 0, record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ attrs = append(attrs, attr)
+ return true
+ })
+ return attrs
+}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE
deleted file mode 100644
index 67c4fb56..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE
+++ /dev/null
@@ -1,187 +0,0 @@
-Copyright © 2014, Roger Peppe, Canonical Inc.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go
deleted file mode 100644
index 32e94721..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package bakery
-
-import (
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// Bakery is a convenience type that contains both an Oven
-// and a Checker.
-type Bakery struct {
- Oven *Oven
- Checker *Checker
-}
-
-// BakeryParams holds a selection of parameters for the Oven
-// and the Checker created by New.
-//
-// For more fine-grained control of parameters, create the
-// Oven or Checker directly.
-//
-// The zero value is OK to use, but won't allow any authentication
-// or third party caveats to be added.
-type BakeryParams struct {
- // Logger is used to send log messages. If it is nil,
- // nothing will be logged.
- Logger Logger
-
- // Checker holds the checker used to check first party caveats.
- // If this is nil, New will use checkers.New(nil).
- Checker FirstPartyCaveatChecker
-
- // RootKeyStore holds the root key store to use. If you need to
- // use a different root key store for different operations,
- // you'll need to pass a RootKeyStoreForOps value to NewOven
- // directly.
- //
- // If this is nil, New will use NewMemRootKeyStore().
- // Note that that is almost certain insufficient for production services
- // that are spread across multiple instances or that need
- // to persist keys across restarts.
- RootKeyStore RootKeyStore
-
- // Locator is used to find out information on third parties when
- // adding third party caveats. If this is nil, no non-local third
- // party caveats can be added.
- Locator ThirdPartyLocator
-
- // Key holds the private key of the oven. If this is nil,
- // no third party caveats may be added.
- Key *KeyPair
-
- // OpsAuthorizer is used to check whether operations are authorized
- // by some other already-authorized operation. If it is nil,
- // NewChecker will assume no operation is authorized by any
- // operation except itself.
- OpsAuthorizer OpsAuthorizer
-
- // Location holds the location to use when creating new macaroons.
- Location string
-
- // LegacyMacaroonOp holds the operation to associate with old
- // macaroons that don't have associated operations.
- // If this is empty, legacy macaroons will not be associated
- // with any operations.
- LegacyMacaroonOp Op
-}
-
-// New returns a new Bakery instance which combines an Oven with a
-// Checker for the convenience of callers that wish to use both
-// together.
-func New(p BakeryParams) *Bakery {
- if p.Checker == nil {
- p.Checker = checkers.New(nil)
- }
- ovenParams := OvenParams{
- Key: p.Key,
- Namespace: p.Checker.Namespace(),
- Location: p.Location,
- Locator: p.Locator,
- LegacyMacaroonOp: p.LegacyMacaroonOp,
- }
- if p.RootKeyStore != nil {
- ovenParams.RootKeyStoreForOps = func(ops []Op) RootKeyStore {
- return p.RootKeyStore
- }
- }
- oven := NewOven(ovenParams)
-
- checker := NewChecker(CheckerParams{
- Checker: p.Checker,
- MacaroonVerifier: oven,
- OpsAuthorizer: p.OpsAuthorizer,
- })
- return &Bakery{
- Oven: oven,
- Checker: checker,
- }
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go
deleted file mode 100644
index b864e2b1..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go
+++ /dev/null
@@ -1,503 +0,0 @@
-package bakery
-
-import (
- "context"
- "sort"
- "sync"
- "time"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// Op holds an entity and action to be authorized on that entity.
-type Op struct {
- // Entity holds the name of the entity to be authorized.
- // Entity names should not contain spaces and should
- // not start with the prefix "login" or "multi-" (conventionally,
- // entity names will be prefixed with the entity type followed
- // by a hyphen.
- Entity string
-
- // Action holds the action to perform on the entity, such as "read"
- // or "delete". It is up to the service using a checker to define
- // a set of operations and keep them consistent over time.
- Action string
-}
-
-// NoOp holds the empty operation, signifying no authorized
-// operation. This is always considered to be authorized.
-// See OpsAuthorizer for one place that it's used.
-var NoOp = Op{}
-
-// CheckerParams holds parameters for NewChecker.
-type CheckerParams struct {
- // Checker is used to check first party caveats when authorizing.
- // If this is nil NewChecker will use checkers.New(nil).
- Checker FirstPartyCaveatChecker
-
- // OpsAuthorizer is used to check whether operations are authorized
- // by some other already-authorized operation. If it is nil,
- // NewChecker will assume no operation is authorized by any
- // operation except itself.
- OpsAuthorizer OpsAuthorizer
-
- // MacaroonVerifier is used to verify macaroons.
- MacaroonVerifier MacaroonVerifier
-
- // Logger is used to log checker operations. If it is nil,
- // DefaultLogger("bakery") will be used.
- Logger Logger
-}
-
-// OpsAuthorizer is used to check whether an operation authorizes some other
-// operation. For example, a macaroon with an operation allowing general access to a service
-// might also grant access to a more specific operation.
-type OpsAuthorizer interface {
- // AuthorizeOp reports which elements of queryOps are authorized by
- // authorizedOp. On return, each element of the slice should represent
- // whether the respective element in queryOps has been authorized.
- // An empty returned slice indicates that no operations are authorized.
- // AuthorizeOps may also return third party caveats that apply to
- // the authorized operations. Access will only be authorized when
- // those caveats are discharged by the client.
- //
- // When not all operations can be authorized with the macaroons
- // supplied to Checker.Auth, the checker will call AuthorizeOps
- // with NoOp, because some operations might be authorized
- // regardless of authority. NoOp will always be the last
- // operation queried within any given Allow call.
- //
- // AuthorizeOps should only return an error if authorization cannot be checked
- // (for example because of a database access failure), not because
- // authorization was denied.
- AuthorizeOps(ctx context.Context, authorizedOp Op, queryOps []Op) ([]bool, []checkers.Caveat, error)
-}
-
-// AuthInfo information about an authorization decision.
-type AuthInfo struct {
- // Macaroons holds all the macaroons that were
- // passed to Auth.
- Macaroons []macaroon.Slice
-
- // Used records which macaroons were used in the
- // authorization decision. It holds one element for
- // each element of Macaroons. Macaroons that
- // were invalid or unnecessary will have a false entry.
- Used []bool
-
- // OpIndexes holds the index of each macaroon
- // that was used to authorize an operation.
- OpIndexes map[Op]int
-}
-
-// Conditions returns the first party caveat caveat conditions hat apply to
-// the given AuthInfo. This can be used to apply appropriate caveats
-// to capability macaroons granted via a Checker.Allow call.
-func (a *AuthInfo) Conditions() []string {
- var squasher caveatSquasher
- for i, ms := range a.Macaroons {
- if !a.Used[i] {
- continue
- }
- for _, m := range ms {
- for _, cav := range m.Caveats() {
- if len(cav.VerificationId) > 0 {
- continue
- }
- squasher.add(string(cav.Id))
- }
- }
- }
- return squasher.final()
-}
-
-// Checker wraps a FirstPartyCaveatChecker and adds authentication and authorization checks.
-//
-// It uses macaroons as authorization tokens but it is not itself responsible for
-// creating the macaroons - see the Oven type (TODO) for one way of doing that.
-type Checker struct {
- FirstPartyCaveatChecker
- p CheckerParams
-}
-
-// NewChecker returns a new Checker using the given parameters.
-func NewChecker(p CheckerParams) *Checker {
- if p.Checker == nil {
- p.Checker = checkers.New(nil)
- }
- if p.Logger == nil {
- p.Logger = DefaultLogger("bakery")
- }
- return &Checker{
- FirstPartyCaveatChecker: p.Checker,
- p: p,
- }
-}
-
-// Auth makes a new AuthChecker instance using the
-// given macaroons to inform authorization decisions.
-func (c *Checker) Auth(mss ...macaroon.Slice) *AuthChecker {
- return &AuthChecker{
- Checker: c,
- macaroons: mss,
- }
-}
-
-// AuthChecker authorizes operations with respect to a user's request.
-type AuthChecker struct {
- // Checker is used to check first party caveats.
- *Checker
- macaroons []macaroon.Slice
- // conditions holds the first party caveat conditions
- // that apply to each of the above macaroons.
- conditions [][]string
- initOnce sync.Once
- initError error
- initErrors []error
- // authIndexes holds for each potentially authorized operation
- // the indexes of the macaroons that authorize it.
- authIndexes map[Op][]int
-}
-
-func (a *AuthChecker) init(ctx context.Context) error {
- a.initOnce.Do(func() {
- a.initError = a.initOnceFunc(ctx)
- })
- return a.initError
-}
-
-func (a *AuthChecker) initOnceFunc(ctx context.Context) error {
- a.authIndexes = make(map[Op][]int)
- a.conditions = make([][]string, len(a.macaroons))
- for i, ms := range a.macaroons {
- ops, conditions, err := a.p.MacaroonVerifier.VerifyMacaroon(ctx, ms)
- if err != nil {
- if !isVerificationError(err) {
- return errgo.Notef(err, "cannot retrieve macaroon")
- }
- a.initErrors = append(a.initErrors, errgo.Mask(err))
- continue
- }
- a.p.Logger.Debugf(ctx, "macaroon %d has valid sig; ops %q, conditions %q", i, ops, conditions)
- // It's a valid macaroon (in principle - we haven't checked first party caveats).
- a.conditions[i] = conditions
- for _, op := range ops {
- a.authIndexes[op] = append(a.authIndexes[op], i)
- }
- }
- return nil
-}
-
-// Allowed returns an AuthInfo that provides information on all
-// operations directly authorized by the macaroons provided
-// to Checker.Auth. Note that this does not include operations that would be indirectly
-// allowed via the OpAuthorizer.
-//
-// Allowed returns an error only when there is an underlying storage failure,
-// not when operations are not authorized.
-func (a *AuthChecker) Allowed(ctx context.Context) (*AuthInfo, error) {
- actx, err := a.newAllowContext(ctx, nil)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- for op, mindexes := range a.authIndexes {
- for _, mindex := range mindexes {
- if actx.status[mindex]&statusOK != 0 {
- actx.status[mindex] |= statusUsed
- actx.opIndexes[op] = mindex
- break
- }
- }
- }
- return actx.newAuthInfo(), nil
-}
-
-func (a *allowContext) newAuthInfo() *AuthInfo {
- info := &AuthInfo{
- Macaroons: a.checker.macaroons,
- Used: make([]bool, len(a.checker.macaroons)),
- OpIndexes: a.opIndexes,
- }
- for i, status := range a.status {
- if status&statusUsed != 0 {
- info.Used[i] = true
- }
- }
- return info
-}
-
-// allowContext holds temporary state used by AuthChecker.allowAny.
-type allowContext struct {
- checker *AuthChecker
-
- // status holds used and authorized status of all the
- // request macaroons.
- status []macaroonStatus
-
- // opIndex holds an entry for each authorized operation
- // that refers to the macaroon that authorized that operation.
- opIndexes map[Op]int
-
- // authed holds which of the requested operations have
- // been authorized so far.
- authed []bool
-
- // need holds all of the requested operations that
- // are remaining to be authorized. needIndex holds the
- // index of each of these operations in the original operations slice
- need []Op
- needIndex []int
-
- // errors holds any errors encountered during authorization.
- errors []error
-}
-
-type macaroonStatus uint8
-
-const (
- statusOK = 1 << iota
- statusUsed
-)
-
-func (a *AuthChecker) newAllowContext(ctx context.Context, ops []Op) (*allowContext, error) {
- actx := &allowContext{
- checker: a,
- status: make([]macaroonStatus, len(a.macaroons)),
- authed: make([]bool, len(ops)),
- need: append([]Op(nil), ops...),
- needIndex: make([]int, len(ops)),
- opIndexes: make(map[Op]int),
- }
- for i := range actx.needIndex {
- actx.needIndex[i] = i
- }
- if err := a.init(ctx); err != nil {
- return actx, errgo.Mask(err)
- }
- // Check all the macaroons with respect to the current context.
- // Technically this is more than we need to do, because some
- // of the macaroons might not authorize the specific operations
- // we're interested in, but that's an optimisation that could happen
- // later if performance becomes an issue with respect to that.
-outer:
- for i, ms := range a.macaroons {
- ctx := checkers.ContextWithMacaroons(ctx, a.Namespace(), ms)
- for _, cond := range a.conditions[i] {
- if err := a.CheckFirstPartyCaveat(ctx, cond); err != nil {
- actx.addError(err)
- continue outer
- }
- }
- actx.status[i] = statusOK
- }
- return actx, nil
-}
-
-// Macaroons returns the macaroons that were passed
-// to Checker.Auth when creating the AuthChecker.
-func (a *AuthChecker) Macaroons() []macaroon.Slice {
- return a.macaroons
-}
-
-// Allow checks that the authorizer's request is authorized to
-// perform all the given operations.
-//
-// If all the operations are allowed, an AuthInfo is returned holding
-// details of the decision.
-//
-// If an operation was not allowed, an error will be returned which may
-// be *DischargeRequiredError holding the operations that remain to
-// be authorized in order to allow authorization to
-// proceed.
-func (a *AuthChecker) Allow(ctx context.Context, ops ...Op) (*AuthInfo, error) {
- actx, err := a.newAllowContext(ctx, ops)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- actx.checkDirect(ctx)
- if len(actx.need) == 0 {
- return actx.newAuthInfo(), nil
- }
- caveats, err := actx.checkIndirect(ctx)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- if len(actx.need) == 0 && len(caveats) == 0 {
- // No more ops need to be authenticated and no caveats to be discharged.
- return actx.newAuthInfo(), nil
- }
- a.p.Logger.Debugf(ctx, "operations still needed after auth check: %#v", actx.need)
- if len(caveats) == 0 || len(actx.need) > 0 {
- allErrors := make([]error, 0, len(a.initErrors)+len(actx.errors))
- allErrors = append(allErrors, a.initErrors...)
- allErrors = append(allErrors, actx.errors...)
- var err error
- if len(allErrors) > 0 {
- // TODO return all errors?
- a.p.Logger.Infof(ctx, "all auth errors: %q", allErrors)
- err = allErrors[0]
- }
- return nil, errgo.WithCausef(err, ErrPermissionDenied, "")
- }
- return nil, &DischargeRequiredError{
- Message: "some operations have extra caveats",
- Ops: ops,
- Caveats: caveats,
- }
-}
-
-// checkDirect checks which operations are directly authorized by
-// the macaroon operations.
-func (a *allowContext) checkDirect(ctx context.Context) {
- defer a.updateNeed()
- for i, op := range a.need {
- if op == NoOp {
- // NoOp is always authorized.
- a.authed[a.needIndex[i]] = true
- continue
- }
- for _, mindex := range a.checker.authIndexes[op] {
- if a.status[mindex]&statusOK != 0 {
- a.authed[a.needIndex[i]] = true
- a.status[mindex] |= statusUsed
- a.opIndexes[op] = mindex
- break
- }
- }
- }
-}
-
-// checkIndirect checks to see if any of the remaining operations are authorized
-// indirectly with the already-authorized operations.
-func (a *allowContext) checkIndirect(ctx context.Context) ([]checkers.Caveat, error) {
- if a.checker.p.OpsAuthorizer == nil {
- return nil, nil
- }
- var allCaveats []checkers.Caveat
- for op, mindexes := range a.checker.authIndexes {
- if len(a.need) == 0 {
- break
- }
- for _, mindex := range mindexes {
- if a.status[mindex]&statusOK == 0 {
- continue
- }
- ctx := checkers.ContextWithMacaroons(ctx, a.checker.Namespace(), a.checker.macaroons[mindex])
- authedOK, caveats, err := a.checker.p.OpsAuthorizer.AuthorizeOps(ctx, op, a.need)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- // TODO we could perhaps combine identical third party caveats here.
- allCaveats = append(allCaveats, caveats...)
- for i, ok := range authedOK {
- if !ok {
- continue
- }
- // Operation is authorized. Mark the appropriate macaroon as used,
- // and remove the operation from the needed list so that we don't
- // bother AuthorizeOps with it again.
- a.status[mindex] |= statusUsed
- a.authed[a.needIndex[i]] = true
- a.opIndexes[a.need[i]] = mindex
- }
- }
- a.updateNeed()
- }
- if len(a.need) == 0 {
- return allCaveats, nil
- }
- // We've still got at least one operation unauthorized.
- // Try to see if it can be authorized with no operation at all.
- authedOK, caveats, err := a.checker.p.OpsAuthorizer.AuthorizeOps(ctx, NoOp, a.need)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- allCaveats = append(allCaveats, caveats...)
- for i, ok := range authedOK {
- if ok {
- a.authed[a.needIndex[i]] = true
- }
- }
- a.updateNeed()
- return allCaveats, nil
-}
-
-// updateNeed removes all authorized operations from a.need
-// and updates a.needIndex appropriately too.
-func (a *allowContext) updateNeed() {
- j := 0
- for i, opIndex := range a.needIndex {
- if a.authed[opIndex] {
- continue
- }
- if i != j {
- a.need[j], a.needIndex[j] = a.need[i], a.needIndex[i]
- }
- j++
- }
- a.need, a.needIndex = a.need[0:j], a.needIndex[0:j]
-}
-
-func (a *allowContext) addError(err error) {
- a.errors = append(a.errors, err)
-}
-
-// caveatSquasher rationalizes first party caveats created for a capability
-// by:
-// - including only the earliest time-before caveat.
-// - removing duplicates.
-type caveatSquasher struct {
- expiry time.Time
- conds []string
-}
-
-func (c *caveatSquasher) add(cond string) {
- if c.add0(cond) {
- c.conds = append(c.conds, cond)
- }
-}
-
-func (c *caveatSquasher) add0(cond string) bool {
- cond, args, err := checkers.ParseCaveat(cond)
- if err != nil {
- // Be safe - if we can't parse the caveat, just leave it there.
- return true
- }
- if cond != checkers.CondTimeBefore {
- return true
- }
- et, err := time.Parse(time.RFC3339Nano, args)
- if err != nil || et.IsZero() {
- // Again, if it doesn't seem valid, leave it alone.
- return true
- }
- if c.expiry.IsZero() || et.Before(c.expiry) {
- c.expiry = et
- }
- return false
-}
-
-func (c *caveatSquasher) final() []string {
- if !c.expiry.IsZero() {
- c.conds = append(c.conds, checkers.TimeBeforeCaveat(c.expiry).Condition)
- }
- if len(c.conds) == 0 {
- return nil
- }
- // Make deterministic and eliminate duplicates.
- sort.Strings(c.conds)
- prev := c.conds[0]
- j := 1
- for _, cond := range c.conds[1:] {
- if cond != prev {
- c.conds[j] = cond
- prev = cond
- j++
- }
- }
- c.conds = c.conds[:j]
- return c.conds
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go
deleted file mode 100644
index 153b31d2..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// The checkers package provides some standard first-party
-// caveat checkers and some primitives for combining them.
-package checkers
-
-import (
- "context"
- "fmt"
- "sort"
- "strings"
-
- "gopkg.in/errgo.v1"
-)
-
-// StdNamespace holds the URI of the standard checkers schema.
-const StdNamespace = "std"
-
-// Constants for all the standard caveat conditions.
-// First and third party caveat conditions are both defined here,
-// even though notionally they exist in separate name spaces.
-const (
- CondDeclared = "declared"
- CondTimeBefore = "time-before"
- CondError = "error"
-)
-
-const (
- CondNeedDeclared = "need-declared"
-)
-
-// Func is the type of a function used by Checker to check a caveat. The
-// cond parameter will hold the caveat condition including any namespace
-// prefix; the arg parameter will hold any additional caveat argument
-// text.
-type Func func(ctx context.Context, cond, arg string) error
-
-// CheckerInfo holds information on a registered checker.
-type CheckerInfo struct {
- // Check holds the actual checker function.
- Check Func
- // Prefix holds the prefix for the checker condition.
- Prefix string
- // Name holds the name of the checker condition.
- Name string
- // Namespace holds the namespace URI for the checker's
- // schema.
- Namespace string
-}
-
-var allCheckers = map[string]Func{
- CondTimeBefore: checkTimeBefore,
- CondDeclared: checkDeclared,
- CondError: checkError,
-}
-
-// NewEmpty returns a checker using the given namespace
-// that has no registered checkers.
-// If ns is nil, a new one will be created.
-func NewEmpty(ns *Namespace) *Checker {
- if ns == nil {
- ns = NewNamespace(nil)
- }
- return &Checker{
- namespace: ns,
- checkers: make(map[string]CheckerInfo),
- }
-}
-
-// RegisterStd registers all the standard checkers in the given checker.
-// If not present already, the standard checkers schema (StdNamespace) is
-// added to the checker's namespace with an empty prefix.
-func RegisterStd(c *Checker) {
- c.namespace.Register(StdNamespace, "")
- for cond, check := range allCheckers {
- c.Register(cond, StdNamespace, check)
- }
-}
-
-// New returns a checker with all the standard caveats checkers registered.
-// If ns is nil, a new one will be created.
-// The standard namespace is also added to ns if not present.
-func New(ns *Namespace) *Checker {
- c := NewEmpty(ns)
- RegisterStd(c)
- return c
-}
-
-// Checker holds a set of checkers for first party caveats.
-// It implements bakery.CheckFirstParty caveat.
-type Checker struct {
- namespace *Namespace
- checkers map[string]CheckerInfo
-}
-
-// Register registers the given condition in the given namespace URI
-// to be checked with the given check function.
-// It will panic if the namespace is not registered or
-// if the condition has already been registered.
-func (c *Checker) Register(cond, uri string, check Func) {
- if check == nil {
- panic(fmt.Errorf("nil check function registered for namespace %q when registering condition %q", uri, cond))
- }
- prefix, ok := c.namespace.Resolve(uri)
- if !ok {
- panic(fmt.Errorf("no prefix registered for namespace %q when registering condition %q", uri, cond))
- }
- if prefix == "" && strings.Contains(cond, ":") {
- panic(fmt.Errorf("caveat condition %q in namespace %q contains a colon but its prefix is empty", cond, uri))
- }
- fullCond := ConditionWithPrefix(prefix, cond)
- if info, ok := c.checkers[fullCond]; ok {
- panic(fmt.Errorf("checker for %q (namespace %q) already registered in namespace %q", fullCond, uri, info.Namespace))
- }
- c.checkers[fullCond] = CheckerInfo{
- Check: check,
- Namespace: uri,
- Name: cond,
- Prefix: prefix,
- }
-}
-
-// Info returns information on all the registered checkers, sorted by namespace
-// and then name.
-func (c *Checker) Info() []CheckerInfo {
- checkers := make([]CheckerInfo, 0, len(c.checkers))
- for _, c := range c.checkers {
- checkers = append(checkers, c)
- }
- sort.Sort(checkerInfoByName(checkers))
- return checkers
-}
-
-// Namespace returns the namespace associated with the
-// checker. It implements bakery.FirstPartyCaveatChecker.Namespace.
-func (c *Checker) Namespace() *Namespace {
- return c.namespace
-}
-
-// CheckFirstPartyCaveat implements bakery.FirstPartyCaveatChecker
-// by checking the caveat against all registered caveats conditions.
-func (c *Checker) CheckFirstPartyCaveat(ctx context.Context, cav string) error {
- cond, arg, err := ParseCaveat(cav)
- if err != nil {
- // If we can't parse it, perhaps it's in some other format,
- // return a not-recognised error.
- return errgo.WithCausef(err, ErrCaveatNotRecognized, "cannot parse caveat %q", cav)
- }
- cf, ok := c.checkers[cond]
- if !ok {
- return errgo.NoteMask(ErrCaveatNotRecognized, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any)
- }
- if err := cf.Check(ctx, cond, arg); err != nil {
- return errgo.NoteMask(err, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any)
- }
- return nil
-}
-
-var errBadCaveat = errgo.New("bad caveat")
-
-func checkError(ctx context.Context, _, arg string) error {
- return errBadCaveat
-}
-
-// ErrCaveatNotRecognized is the cause of errors returned
-// from caveat checkers when the caveat was not
-// recognized.
-var ErrCaveatNotRecognized = errgo.New("caveat not recognized")
-
-// Caveat represents a condition that must be true for a check to
-// complete successfully. If Location is non-empty, the caveat must be
-// discharged by a third party at the given location.
-// The Namespace field holds the namespace URI of the
-// condition - if it is non-empty, it will be converted to
-// a namespace prefix before adding to the macaroon.
-type Caveat struct {
- Condition string
- Namespace string
- Location string
-}
-
-// Condition builds a caveat condition from the given name and argument.
-func Condition(name, arg string) string {
- if arg == "" {
- return name
- }
- return name + " " + arg
-}
-
-func firstParty(name, arg string) Caveat {
- return Caveat{
- Condition: Condition(name, arg),
- Namespace: StdNamespace,
- }
-}
-
-// ParseCaveat parses a caveat into an identifier, identifying the
-// checker that should be used, and the argument to the checker (the
-// rest of the string).
-//
-// The identifier is taken from all the characters before the first
-// space character.
-func ParseCaveat(cav string) (cond, arg string, err error) {
- if cav == "" {
- return "", "", fmt.Errorf("empty caveat")
- }
- i := strings.IndexByte(cav, ' ')
- if i < 0 {
- return cav, "", nil
- }
- if i == 0 {
- return "", "", fmt.Errorf("caveat starts with space character")
- }
- return cav[0:i], cav[i+1:], nil
-}
-
-// ErrorCaveatf returns a caveat that will never be satisfied, holding
-// the given fmt.Sprintf formatted text as the text of the caveat.
-//
-// This should only be used for highly unusual conditions that are never
-// expected to happen in practice, such as a malformed key that is
-// conventionally passed as a constant. It's not a panic but you should
-// only use it in cases where a panic might possibly be appropriate.
-//
-// This mechanism means that caveats can be created without error
-// checking and a later systematic check at a higher level (in the
-// bakery package) can produce an error instead.
-func ErrorCaveatf(f string, a ...interface{}) Caveat {
- return firstParty(CondError, fmt.Sprintf(f, a...))
-}
-
-type checkerInfoByName []CheckerInfo
-
-func (c checkerInfoByName) Less(i, j int) bool {
- info0, info1 := &c[i], &c[j]
- if info0.Namespace != info1.Namespace {
- return info0.Namespace < info1.Namespace
- }
- return info0.Name < info1.Name
-}
-
-func (c checkerInfoByName) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
-
-func (c checkerInfoByName) Len() int {
- return len(c)
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go
deleted file mode 100644
index f41d6c98..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package checkers
-
-import (
- "context"
- "strings"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-)
-
-type macaroonsKey struct{}
-
-type macaroonsValue struct {
- ns *Namespace
- ms macaroon.Slice
-}
-
-// ContextWithMacaroons returns the given context associated with a
-// macaroon slice and the name space to use to interpret caveats in
-// the macaroons.
-func ContextWithMacaroons(ctx context.Context, ns *Namespace, ms macaroon.Slice) context.Context {
- return context.WithValue(ctx, macaroonsKey{}, macaroonsValue{
- ns: ns,
- ms: ms,
- })
-}
-
-// MacaroonsFromContext returns the namespace and macaroons associated
-// with the context by ContextWithMacaroons. This can be used to
-// implement "structural" first-party caveats that are predicated on
-// the macaroons being validated.
-func MacaroonsFromContext(ctx context.Context) (*Namespace, macaroon.Slice) {
- v, _ := ctx.Value(macaroonsKey{}).(macaroonsValue)
- return v.ns, v.ms
-}
-
-// DeclaredCaveat returns a "declared" caveat asserting that the given key is
-// set to the given value. If a macaroon has exactly one first party
-// caveat asserting the value of a particular key, then InferDeclared
-// will be able to infer the value, and then DeclaredChecker will allow
-// the declared value if it has the value specified here.
-//
-// If the key is empty or contains a space, DeclaredCaveat
-// will return an error caveat.
-func DeclaredCaveat(key string, value string) Caveat {
- if strings.Contains(key, " ") || key == "" {
- return ErrorCaveatf("invalid caveat 'declared' key %q", key)
- }
- return firstParty(CondDeclared, key+" "+value)
-}
-
-// NeedDeclaredCaveat returns a third party caveat that
-// wraps the provided third party caveat and requires
-// that the third party must add "declared" caveats for
-// all the named keys.
-// TODO(rog) namespaces in third party caveats?
-func NeedDeclaredCaveat(cav Caveat, keys ...string) Caveat {
- if cav.Location == "" {
- return ErrorCaveatf("need-declared caveat is not third-party")
- }
- return Caveat{
- Location: cav.Location,
- Condition: CondNeedDeclared + " " + strings.Join(keys, ",") + " " + cav.Condition,
- }
-}
-
-func checkDeclared(ctx context.Context, _, arg string) error {
- parts := strings.SplitN(arg, " ", 2)
- if len(parts) != 2 {
- return errgo.Newf("declared caveat has no value")
- }
- ns, ms := MacaroonsFromContext(ctx)
- attrs := InferDeclared(ns, ms)
- val, ok := attrs[parts[0]]
- if !ok {
- return errgo.Newf("got %s=null, expected %q", parts[0], parts[1])
- }
- if val != parts[1] {
- return errgo.Newf("got %s=%q, expected %q", parts[0], val, parts[1])
- }
- return nil
-}
-
-// InferDeclared retrieves any declared information from
-// the given macaroons and returns it as a key-value map.
-//
-// Information is declared with a first party caveat as created
-// by DeclaredCaveat.
-//
-// If there are two caveats that declare the same key with
-// different values, the information is omitted from the map.
-// When the caveats are later checked, this will cause the
-// check to fail.
-func InferDeclared(ns *Namespace, ms macaroon.Slice) map[string]string {
- var conditions []string
- for _, m := range ms {
- for _, cav := range m.Caveats() {
- if cav.Location == "" {
- conditions = append(conditions, string(cav.Id))
- }
- }
- }
- return InferDeclaredFromConditions(ns, conditions)
-}
-
-// InferDeclaredFromConditions is like InferDeclared except that
-// it is passed a set of first party caveat conditions rather than a set of macaroons.
-func InferDeclaredFromConditions(ns *Namespace, conds []string) map[string]string {
- var conflicts []string
- // If we can't resolve that standard namespace, then we'll look for
- // just bare "declared" caveats which will work OK for legacy
- // macaroons with no namespace.
- prefix, _ := ns.Resolve(StdNamespace)
- declaredCond := prefix + CondDeclared
-
- info := make(map[string]string)
- for _, cond := range conds {
- name, rest, _ := ParseCaveat(cond)
- if name != declaredCond {
- continue
- }
- parts := strings.SplitN(rest, " ", 2)
- if len(parts) != 2 {
- continue
- }
- key, val := parts[0], parts[1]
- if oldVal, ok := info[key]; ok && oldVal != val {
- conflicts = append(conflicts, key)
- continue
- }
- info[key] = val
- }
- for _, key := range conflicts {
- delete(info, key)
- }
- return info
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go
deleted file mode 100644
index 8fbc8f87..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package checkers
-
-import (
- "sort"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "gopkg.in/errgo.v1"
-)
-
-// Namespace holds maps from schema URIs to the
-// prefixes that are used to encode them in first party
-// caveats. Several different URIs may map to the same
-// prefix - this is usual when several different backwardly
-// compatible schema versions are registered.
-type Namespace struct {
- uriToPrefix map[string]string
-}
-
-// Equal reports whether ns2 encodes the same namespace
-// as the receiver.
-func (ns1 *Namespace) Equal(ns2 *Namespace) bool {
- if ns1 == ns2 || ns1 == nil || ns2 == nil {
- return ns1 == ns2
- }
- if len(ns1.uriToPrefix) != len(ns2.uriToPrefix) {
- return false
- }
- for k, v := range ns1.uriToPrefix {
- if ns2.uriToPrefix[k] != v {
- return false
- }
- }
- return true
-}
-
-// NewNamespace returns a new namespace with the
-// given initial contents. It will panic if any of the
-// URI keys or their associated prefix are invalid
-// (see IsValidSchemaURI and IsValidPrefix).
-func NewNamespace(uriToPrefix map[string]string) *Namespace {
- ns := &Namespace{
- uriToPrefix: make(map[string]string),
- }
- for uri, prefix := range uriToPrefix {
- ns.Register(uri, prefix)
- }
- return ns
-}
-
-// String returns the namespace representation as returned by
-// ns.MarshalText.
-func (ns *Namespace) String() string {
- data, _ := ns.MarshalText()
- return string(data)
-}
-
-// MarshalText implements encoding.TextMarshaler by
-// returning all the elements in the namespace sorted by
-// URI, joined to the associated prefix with a colon and
-// separated with spaces.
-func (ns *Namespace) MarshalText() ([]byte, error) {
- if ns == nil || len(ns.uriToPrefix) == 0 {
- return nil, nil
- }
- uris := make([]string, 0, len(ns.uriToPrefix))
- dataLen := 0
- for uri, prefix := range ns.uriToPrefix {
- uris = append(uris, uri)
- dataLen += len(uri) + 1 + len(prefix) + 1
- }
- sort.Strings(uris)
- data := make([]byte, 0, dataLen)
- for i, uri := range uris {
- if i > 0 {
- data = append(data, ' ')
- }
- data = append(data, uri...)
- data = append(data, ':')
- data = append(data, ns.uriToPrefix[uri]...)
- }
- return data, nil
-}
-
-func (ns *Namespace) UnmarshalText(data []byte) error {
- uriToPrefix := make(map[string]string)
- elems := strings.Fields(string(data))
- for _, elem := range elems {
- i := strings.LastIndex(elem, ":")
- if i == -1 {
- return errgo.Newf("no colon in namespace field %q", elem)
- }
- uri, prefix := elem[0:i], elem[i+1:]
- if !IsValidSchemaURI(uri) {
- // Currently this can't happen because the only invalid URIs
- // are those which contain a space
- return errgo.Newf("invalid URI %q in namespace field %q", uri, elem)
- }
- if !IsValidPrefix(prefix) {
- return errgo.Newf("invalid prefix %q in namespace field %q", prefix, elem)
- }
- if _, ok := uriToPrefix[uri]; ok {
- return errgo.Newf("duplicate URI %q in namespace %q", uri, data)
- }
- uriToPrefix[uri] = prefix
- }
- ns.uriToPrefix = uriToPrefix
- return nil
-}
-
-// EnsureResolved tries to resolve the given schema URI to a prefix and
-// returns the prefix and whether the resolution was successful. If the
-// URI hasn't been registered but a compatible version has, the
-// given URI is registered with the same prefix.
-func (ns *Namespace) EnsureResolved(uri string) (string, bool) {
- // TODO(rog) compatibility
- return ns.Resolve(uri)
-}
-
-// Resolve resolves the given schema URI to its registered prefix and
-// returns the prefix and whether the resolution was successful.
-//
-// If ns is nil, it is treated as if it were empty.
-//
-// Resolve does not mutate ns and may be called concurrently
-// with other non-mutating Namespace methods.
-func (ns *Namespace) Resolve(uri string) (string, bool) {
- if ns == nil {
- return "", false
- }
- prefix, ok := ns.uriToPrefix[uri]
- return prefix, ok
-}
-
-// ResolveCaveat resolves the given caveat by using
-// Resolve to map from its schema namespace to the appropriate prefix using
-// Resolve. If there is no registered prefix for the namespace,
-// it returns an error caveat.
-//
-// If ns.Namespace is empty or ns.Location is non-empty, it returns cav unchanged.
-//
-// If ns is nil, it is treated as if it were empty.
-//
-// ResolveCaveat does not mutate ns and may be called concurrently
-// with other non-mutating Namespace methods.
-func (ns *Namespace) ResolveCaveat(cav Caveat) Caveat {
- // TODO(rog) If a namespace isn't registered, try to resolve it by
- // resolving it to the latest compatible version that is
- // registered.
- if cav.Namespace == "" || cav.Location != "" {
- return cav
- }
- prefix, ok := ns.Resolve(cav.Namespace)
- if !ok {
- errCav := ErrorCaveatf("caveat %q in unregistered namespace %q", cav.Condition, cav.Namespace)
- if errCav.Namespace != cav.Namespace {
- prefix, _ = ns.Resolve(errCav.Namespace)
- }
- cav = errCav
- }
- if prefix != "" {
- cav.Condition = ConditionWithPrefix(prefix, cav.Condition)
- }
- cav.Namespace = ""
- return cav
-}
-
-// ConditionWithPrefix returns the given string prefixed by the
-// given prefix. If the prefix is non-empty, a colon
-// is used to separate them.
-func ConditionWithPrefix(prefix, condition string) string {
- if prefix == "" {
- return condition
- }
- return prefix + ":" + condition
-}
-
-// Register registers the given URI and associates it
-// with the given prefix. If the URI has already been registered,
-// this is a no-op.
-func (ns *Namespace) Register(uri, prefix string) {
- if !IsValidSchemaURI(uri) {
- panic(errgo.Newf("cannot register invalid URI %q (prefix %q)", uri, prefix))
- }
- if !IsValidPrefix(prefix) {
- panic(errgo.Newf("cannot register invalid prefix %q for URI %q", prefix, uri))
- }
- if _, ok := ns.uriToPrefix[uri]; !ok {
- ns.uriToPrefix[uri] = prefix
- }
-}
-
-func invalidSchemaRune(r rune) bool {
- return unicode.IsSpace(r)
-}
-
-// IsValidSchemaURI reports whether the given argument is suitable for
-// use as a namespace schema URI. It must be non-empty, a valid UTF-8
-// string and it must not contain white space.
-func IsValidSchemaURI(uri string) bool {
- // TODO more stringent requirements?
- return len(uri) > 0 &&
- utf8.ValidString(uri) &&
- strings.IndexFunc(uri, invalidSchemaRune) == -1
-}
-
-func invalidPrefixRune(r rune) bool {
- return r == ' ' || r == ':' || unicode.IsSpace(r)
-}
-
-func IsValidPrefix(prefix string) bool {
- return utf8.ValidString(prefix) && strings.IndexFunc(prefix, invalidPrefixRune) == -1
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go
deleted file mode 100644
index bd71cbbc..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package checkers
-
-import (
- "context"
- "fmt"
- "time"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-)
-
-// Clock represents a clock that can be faked for testing purposes.
-type Clock interface {
- Now() time.Time
-}
-
-type timeKey struct{}
-
-func ContextWithClock(ctx context.Context, clock Clock) context.Context {
- if clock == nil {
- return ctx
- }
- return context.WithValue(ctx, timeKey{}, clock)
-}
-
-func clockFromContext(ctx context.Context) Clock {
- c, _ := ctx.Value(timeKey{}).(Clock)
- return c
-}
-
-func checkTimeBefore(ctx context.Context, _, arg string) error {
- var now time.Time
- if clock := clockFromContext(ctx); clock != nil {
- now = clock.Now()
- } else {
- now = time.Now()
- }
- t, err := time.Parse(time.RFC3339Nano, arg)
- if err != nil {
- return errgo.Mask(err)
- }
- if !now.Before(t) {
- return fmt.Errorf("macaroon has expired")
- }
- return nil
-}
-
-// TimeBeforeCaveat returns a caveat that specifies that
-// the time that it is checked should be before t.
-func TimeBeforeCaveat(t time.Time) Caveat {
- return firstParty(CondTimeBefore, t.UTC().Format(time.RFC3339Nano))
-}
-
-// ExpiryTime returns the minimum time of any time-before caveats found
-// in the given slice and whether there were any such caveats found.
-//
-// The ns parameter is used to determine the standard namespace prefix - if
-// the standard namespace is not found, the empty prefix is assumed.
-func ExpiryTime(ns *Namespace, cavs []macaroon.Caveat) (time.Time, bool) {
- prefix, _ := ns.Resolve(StdNamespace)
- timeBeforeCond := ConditionWithPrefix(prefix, CondTimeBefore)
- var t time.Time
- var expires bool
- for _, cav := range cavs {
- cav := string(cav.Id)
- name, rest, _ := ParseCaveat(cav)
- if name != timeBeforeCond {
- continue
- }
- et, err := time.Parse(time.RFC3339Nano, rest)
- if err != nil {
- continue
- }
- if !expires || et.Before(t) {
- t = et
- expires = true
- }
- }
- return t, expires
-}
-
-// MacaroonsExpiryTime returns the minimum time of any time-before
-// caveats found in the given macaroons and whether there were
-// any such caveats found.
-func MacaroonsExpiryTime(ns *Namespace, ms macaroon.Slice) (time.Time, bool) {
- var t time.Time
- var expires bool
- for _, m := range ms {
- if et, ex := ExpiryTime(ns, m.Caveats()); ex {
- if !expires || et.Before(t) {
- t = et
- expires = true
- }
- }
- }
- return t, expires
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go
deleted file mode 100644
index fb76ba55..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package bakery
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/base64"
- "encoding/binary"
- "encoding/json"
-
- "golang.org/x/crypto/nacl/box"
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-type caveatRecord struct {
- RootKey []byte
- Condition string
-}
-
-// caveatJSON defines the format of a V1 JSON-encoded third party caveat id.
-type caveatJSON struct {
- ThirdPartyPublicKey *PublicKey
- FirstPartyPublicKey *PublicKey
- Nonce []byte
- Id string
-}
-
-// encodeCaveat encrypts a third-party caveat with the given condtion
-// and root key. The thirdPartyInfo key holds information about the
-// third party we're encrypting the caveat for; the key is the
-// public/private key pair of the party that's adding the caveat.
-//
-// The caveat will be encoded according to the version information
-// found in thirdPartyInfo.
-func encodeCaveat(
- condition string,
- rootKey []byte,
- thirdPartyInfo ThirdPartyInfo,
- key *KeyPair,
- ns *checkers.Namespace,
-) ([]byte, error) {
- switch thirdPartyInfo.Version {
- case Version0, Version1:
- return encodeCaveatV1(condition, rootKey, &thirdPartyInfo.PublicKey, key)
- case Version2:
- return encodeCaveatV2(condition, rootKey, &thirdPartyInfo.PublicKey, key)
- default:
- // Version 3 or later - use V3.
- return encodeCaveatV3(condition, rootKey, &thirdPartyInfo.PublicKey, key, ns)
- }
-}
-
-// encodeCaveatV1 creates a JSON-encoded third-party caveat
-// with the given condtion and root key. The thirdPartyPubKey key
-// represents the public key of the third party we're encrypting
-// the caveat for; the key is the public/private key pair of the party
-// that's adding the caveat.
-func encodeCaveatV1(
- condition string,
- rootKey []byte,
- thirdPartyPubKey *PublicKey,
- key *KeyPair,
-) ([]byte, error) {
- var nonce [NonceLen]byte
- if _, err := rand.Read(nonce[:]); err != nil {
- return nil, errgo.Notef(err, "cannot generate random number for nonce")
- }
- plain := caveatRecord{
- RootKey: rootKey,
- Condition: condition,
- }
- plainData, err := json.Marshal(&plain)
- if err != nil {
- return nil, errgo.Notef(err, "cannot marshal %#v", &plain)
- }
- sealed := box.Seal(nil, plainData, &nonce, thirdPartyPubKey.boxKey(), key.Private.boxKey())
- id := caveatJSON{
- ThirdPartyPublicKey: thirdPartyPubKey,
- FirstPartyPublicKey: &key.Public,
- Nonce: nonce[:],
- Id: base64.StdEncoding.EncodeToString(sealed),
- }
- data, err := json.Marshal(id)
- if err != nil {
- return nil, errgo.Notef(err, "cannot marshal %#v", id)
- }
- buf := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
- base64.StdEncoding.Encode(buf, data)
- return buf, nil
-}
-
-// encodeCaveatV2 creates a version 2 third-party caveat.
-func encodeCaveatV2(
- condition string,
- rootKey []byte,
- thirdPartyPubKey *PublicKey,
- key *KeyPair,
-) ([]byte, error) {
- return encodeCaveatV2V3(Version2, condition, rootKey, thirdPartyPubKey, key, nil)
-}
-
-// encodeCaveatV3 creates a version 3 third-party caveat.
-func encodeCaveatV3(
- condition string,
- rootKey []byte,
- thirdPartyPubKey *PublicKey,
- key *KeyPair,
- ns *checkers.Namespace,
-) ([]byte, error) {
- return encodeCaveatV2V3(Version3, condition, rootKey, thirdPartyPubKey, key, ns)
-}
-
-const publicKeyPrefixLen = 4
-
-// version3CaveatMinLen holds an underestimate of the
-// minimum length of a version 3 caveat.
-const version3CaveatMinLen = 1 + 4 + 32 + 24 + box.Overhead + 1
-
-// encodeCaveatV3 creates a version 2 or version 3 third-party caveat.
-//
-// The format has the following packed binary fields (note
-// that all fields up to and including the nonce are the same
-// as the v2 format):
-//
-// version 2 or 3 [1 byte]
-// first 4 bytes of third-party Curve25519 public key [4 bytes]
-// first-party Curve25519 public key [32 bytes]
-// nonce [24 bytes]
-// encrypted secret part [rest of message]
-//
-// The encrypted part encrypts the following fields
-// with box.Seal:
-//
-// version 2 or 3 [1 byte]
-// length of root key [n: uvarint]
-// root key [n bytes]
-// length of encoded namespace [n: uvarint] (Version 3 only)
-// encoded namespace [n bytes] (Version 3 only)
-// condition [rest of encrypted part]
-func encodeCaveatV2V3(
- version Version,
- condition string,
- rootKey []byte,
- thirdPartyPubKey *PublicKey,
- key *KeyPair,
- ns *checkers.Namespace,
-) ([]byte, error) {
-
- var nsData []byte
- if version >= Version3 {
- data, err := ns.MarshalText()
- if err != nil {
- return nil, errgo.Mask(err)
- }
- nsData = data
- }
- // dataLen is our estimate of how long the data will be.
- // As we always use append, this doesn't have to be strictly
- // accurate but it's nice to avoid allocations.
- dataLen := 0 +
- 1 + // version
- publicKeyPrefixLen +
- KeyLen +
- NonceLen +
- box.Overhead +
- 1 + // version
- uvarintLen(uint64(len(rootKey))) +
- len(rootKey) +
- uvarintLen(uint64(len(nsData))) +
- len(nsData) +
- len(condition)
-
- var nonce [NonceLen]byte = uuidGen.Next()
-
- data := make([]byte, 0, dataLen)
- data = append(data, byte(version))
- data = append(data, thirdPartyPubKey.Key[:publicKeyPrefixLen]...)
- data = append(data, key.Public.Key[:]...)
- data = append(data, nonce[:]...)
- secret := encodeSecretPartV2V3(version, condition, rootKey, nsData)
- return box.Seal(data, secret, &nonce, thirdPartyPubKey.boxKey(), key.Private.boxKey()), nil
-}
-
-// encodeSecretPartV2V3 creates a version 2 or version 3 secret part of the third party
-// caveat. The returned data is not encrypted.
-//
-// The format has the following packed binary fields:
-// version 2 or 3 [1 byte]
-// root key length [n: uvarint]
-// root key [n bytes]
-// namespace length [n: uvarint] (v3 only)
-// namespace [n bytes] (v3 only)
-// predicate [rest of message]
-func encodeSecretPartV2V3(version Version, condition string, rootKey, nsData []byte) []byte {
- data := make([]byte, 0, 1+binary.MaxVarintLen64+len(rootKey)+len(condition))
- data = append(data, byte(version)) // version
- data = appendUvarint(data, uint64(len(rootKey)))
- data = append(data, rootKey...)
- if version >= Version3 {
- data = appendUvarint(data, uint64(len(nsData)))
- data = append(data, nsData...)
- }
- data = append(data, condition...)
- return data
-}
-
-// decodeCaveat attempts to decode caveat by decrypting the encrypted part
-// using key.
-func decodeCaveat(key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) {
- if len(caveat) == 0 {
- return nil, errgo.New("empty third party caveat")
- }
- switch caveat[0] {
- case byte(Version2):
- return decodeCaveatV2V3(Version2, key, caveat)
- case byte(Version3):
- if len(caveat) < version3CaveatMinLen {
- // If it has the version 3 caveat tag and it's too short, it's
- // almost certainly an id, not an encrypted payload.
- return nil, errgo.Newf("caveat id payload not provided for caveat id %q", caveat)
- }
- return decodeCaveatV2V3(Version3, key, caveat)
- case 'e':
- // 'e' will be the first byte if the caveatid is a base64 encoded JSON object.
- return decodeCaveatV1(key, caveat)
- default:
- return nil, errgo.Newf("caveat has unsupported version %d", caveat[0])
- }
-}
-
-// decodeCaveatV1 attempts to decode a base64 encoded JSON id. This
-// encoding is nominally version -1.
-func decodeCaveatV1(key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) {
- data := make([]byte, (3*len(caveat)+3)/4)
- n, err := base64.StdEncoding.Decode(data, caveat)
- if err != nil {
- return nil, errgo.Notef(err, "cannot base64-decode caveat")
- }
- data = data[:n]
- var wrapper caveatJSON
- if err := json.Unmarshal(data, &wrapper); err != nil {
- return nil, errgo.Notef(err, "cannot unmarshal caveat %q", data)
- }
- if !bytes.Equal(key.Public.Key[:], wrapper.ThirdPartyPublicKey.Key[:]) {
- return nil, errgo.New("public key mismatch")
- }
- if wrapper.FirstPartyPublicKey == nil {
- return nil, errgo.New("target service public key not specified")
- }
- // The encrypted string is base64 encoded in the JSON representation.
- secret, err := base64.StdEncoding.DecodeString(wrapper.Id)
- if err != nil {
- return nil, errgo.Notef(err, "cannot base64-decode encrypted data")
- }
- var nonce [NonceLen]byte
- if copy(nonce[:], wrapper.Nonce) < NonceLen {
- return nil, errgo.Newf("nonce too short %x", wrapper.Nonce)
- }
- c, ok := box.Open(nil, secret, &nonce, wrapper.FirstPartyPublicKey.boxKey(), key.Private.boxKey())
- if !ok {
- return nil, errgo.Newf("cannot decrypt caveat %#v", wrapper)
- }
- var record caveatRecord
- if err := json.Unmarshal(c, &record); err != nil {
- return nil, errgo.Notef(err, "cannot decode third party caveat record")
- }
- return &ThirdPartyCaveatInfo{
- Condition: []byte(record.Condition),
- FirstPartyPublicKey: *wrapper.FirstPartyPublicKey,
- ThirdPartyKeyPair: *key,
- RootKey: record.RootKey,
- Caveat: caveat,
- Version: Version1,
- Namespace: legacyNamespace(),
- }, nil
-}
-
-// decodeCaveatV2V3 decodes a version 2 or version 3 caveat.
-func decodeCaveatV2V3(version Version, key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) {
- origCaveat := caveat
- if len(caveat) < 1+publicKeyPrefixLen+KeyLen+NonceLen+box.Overhead {
- return nil, errgo.New("caveat id too short")
- }
- caveat = caveat[1:] // skip version (already checked)
-
- publicKeyPrefix, caveat := caveat[:publicKeyPrefixLen], caveat[publicKeyPrefixLen:]
- if !bytes.Equal(key.Public.Key[:publicKeyPrefixLen], publicKeyPrefix) {
- return nil, errgo.New("public key mismatch")
- }
-
- var firstPartyPub PublicKey
- copy(firstPartyPub.Key[:], caveat[:KeyLen])
- caveat = caveat[KeyLen:]
-
- var nonce [NonceLen]byte
- copy(nonce[:], caveat[:NonceLen])
- caveat = caveat[NonceLen:]
-
- data, ok := box.Open(nil, caveat, &nonce, firstPartyPub.boxKey(), key.Private.boxKey())
- if !ok {
- return nil, errgo.Newf("cannot decrypt caveat id")
- }
- rootKey, ns, condition, err := decodeSecretPartV2V3(version, data)
- if err != nil {
- return nil, errgo.Notef(err, "invalid secret part")
- }
- return &ThirdPartyCaveatInfo{
- Condition: condition,
- FirstPartyPublicKey: firstPartyPub,
- ThirdPartyKeyPair: *key,
- RootKey: rootKey,
- Caveat: origCaveat,
- Version: version,
- Namespace: ns,
- }, nil
-}
-
-func decodeSecretPartV2V3(version Version, data []byte) (rootKey []byte, ns *checkers.Namespace, condition []byte, err error) {
- fail := func(err error) ([]byte, *checkers.Namespace, []byte, error) {
- return nil, nil, nil, err
- }
- if len(data) < 1 {
- return fail(errgo.New("secret part too short"))
- }
- gotVersion, data := data[0], data[1:]
- if version != Version(gotVersion) {
- return fail(errgo.Newf("unexpected secret part version, got %d want %d", gotVersion, version))
- }
-
- l, n := binary.Uvarint(data)
- if n <= 0 || uint64(n)+l > uint64(len(data)) {
- return fail(errgo.Newf("invalid root key length"))
- }
- data = data[n:]
- rootKey, data = data[:l], data[l:]
-
- if version >= Version3 {
- var nsData []byte
- var ns1 checkers.Namespace
-
- l, n = binary.Uvarint(data)
- if n <= 0 || uint64(n)+l > uint64(len(data)) {
- return fail(errgo.Newf("invalid namespace length"))
- }
- data = data[n:]
- nsData, data = data[:l], data[l:]
- if err := ns1.UnmarshalText(nsData); err != nil {
- return fail(errgo.Notef(err, "cannot unmarshal namespace"))
- }
- ns = &ns1
- } else {
- ns = legacyNamespace()
- }
- return rootKey, ns, data, nil
-}
-
-// appendUvarint appends n to data encoded as a variable-length
-// unsigned integer.
-func appendUvarint(data []byte, n uint64) []byte {
- // Ensure the capacity is sufficient. If our space calculations when
- // allocating data were correct, this should never happen,
- // but be defensive just in case.
- for need := uvarintLen(n); cap(data)-len(data) < need; {
- data1 := append(data[0:cap(data)], 0)
- data = data1[0:len(data)]
- }
- nlen := binary.PutUvarint(data[len(data):cap(data)], n)
- return data[0 : len(data)+nlen]
-}
-
-// uvarintLen returns the number of bytes that n will require
-// when encoded with binary.PutUvarint.
-func uvarintLen(n uint64) int {
- len := 1
- n >>= 7
- for ; n > 0; n >>= 7 {
- len++
- }
- return len
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go
deleted file mode 100644
index 4c7b0ae6..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package bakery
-
-import (
- "context"
- "crypto/rand"
- "fmt"
- "strconv"
- "strings"
-
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// LocalThirdPartyCaveat returns a third-party caveat that, when added
-// to a macaroon with AddCaveat, results in a caveat
-// with the location "local", encrypted with the given public key.
-// This can be automatically discharged by DischargeAllWithKey.
-func LocalThirdPartyCaveat(key *PublicKey, version Version) checkers.Caveat {
- var loc string
- if version < Version2 {
- loc = "local " + key.String()
- } else {
- loc = fmt.Sprintf("local %d %s", version, key)
- }
- return checkers.Caveat{
- Location: loc,
- }
-}
-
-// parseLocalLocation parses a local caveat location as generated by
-// LocalThirdPartyCaveat. This is of the form:
-//
-// local
-//
-// where is the bakery version of the client that we're
-// adding the local caveat for.
-//
-// It returns false if the location does not represent a local
-// caveat location.
-func parseLocalLocation(loc string) (ThirdPartyInfo, bool) {
- if !strings.HasPrefix(loc, "local ") {
- return ThirdPartyInfo{}, false
- }
- version := Version1
- fields := strings.Fields(loc)
- fields = fields[1:] // Skip "local"
- switch len(fields) {
- case 2:
- v, err := strconv.Atoi(fields[0])
- if err != nil {
- return ThirdPartyInfo{}, false
- }
- version = Version(v)
- fields = fields[1:]
- fallthrough
- case 1:
- var key PublicKey
- if err := key.UnmarshalText([]byte(fields[0])); err != nil {
- return ThirdPartyInfo{}, false
- }
- return ThirdPartyInfo{
- PublicKey: key,
- Version: version,
- }, true
- default:
- return ThirdPartyInfo{}, false
- }
-}
-
-// DischargeParams holds parameters for a Discharge call.
-type DischargeParams struct {
- // Id holds the id to give to the discharge macaroon.
- // If Caveat is empty, then the id also holds the
- // encrypted third party caveat.
- Id []byte
-
- // Caveat holds the encrypted third party caveat. If this
- // is nil, Id will be used.
- Caveat []byte
-
- // Key holds the key to use to decrypt the third party
- // caveat information and to encrypt any additional
- // third party caveats returned by the caveat checker.
- Key *KeyPair
-
- // Checker is used to check the third party caveat,
- // and may also return further caveats to be added to
- // the discharge macaroon.
- Checker ThirdPartyCaveatChecker
-
- // Locator is used to information on third parties
- // referred to by third party caveats returned by the Checker.
- Locator ThirdPartyLocator
-}
-
-// Discharge creates a macaroon to discharges a third party caveat.
-// The given parameters specify the caveat and how it should be checked/
-//
-// The condition implicit in the caveat is checked for validity using p.Checker. If
-// it is valid, a new macaroon is returned which discharges the caveat.
-//
-// The macaroon is created with a version derived from the version
-// that was used to encode the id.
-func Discharge(ctx context.Context, p DischargeParams) (*Macaroon, error) {
- var caveatIdPrefix []byte
- if p.Caveat == nil {
- // The caveat information is encoded in the id itself.
- p.Caveat = p.Id
- } else {
- // We've been given an explicit id, so when extra third party
- // caveats are added, use that id as the prefix
- // for any more ids.
- caveatIdPrefix = p.Id
- }
- cavInfo, err := decodeCaveat(p.Key, p.Caveat)
- if err != nil {
- return nil, errgo.Notef(err, "discharger cannot decode caveat id")
- }
- cavInfo.Id = p.Id
- // Note that we don't check the error - we allow the
- // third party checker to see even caveats that we can't
- // understand.
- cond, arg, _ := checkers.ParseCaveat(string(cavInfo.Condition))
-
- var caveats []checkers.Caveat
- if cond == checkers.CondNeedDeclared {
- cavInfo.Condition = []byte(arg)
- caveats, err = checkNeedDeclared(ctx, cavInfo, p.Checker)
- } else {
- caveats, err = p.Checker.CheckThirdPartyCaveat(ctx, cavInfo)
- }
- if err != nil {
- return nil, errgo.Mask(err, errgo.Any)
- }
- // Note that the discharge macaroon does not need to
- // be stored persistently. Indeed, it would be a problem if
- // we did, because then the macaroon could potentially be used
- // for normal authorization with the third party.
- m, err := NewMacaroon(cavInfo.RootKey, p.Id, "", cavInfo.Version, cavInfo.Namespace)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- m.caveatIdPrefix = caveatIdPrefix
- for _, cav := range caveats {
- if err := m.AddCaveat(ctx, cav, p.Key, p.Locator); err != nil {
- return nil, errgo.Notef(err, "could not add caveat")
- }
- }
- return m, nil
-}
-
-func checkNeedDeclared(ctx context.Context, cavInfo *ThirdPartyCaveatInfo, checker ThirdPartyCaveatChecker) ([]checkers.Caveat, error) {
- arg := string(cavInfo.Condition)
- i := strings.Index(arg, " ")
- if i <= 0 {
- return nil, errgo.Newf("need-declared caveat requires an argument, got %q", arg)
- }
- needDeclared := strings.Split(arg[0:i], ",")
- for _, d := range needDeclared {
- if d == "" {
- return nil, errgo.New("need-declared caveat with empty required attribute")
- }
- }
- if len(needDeclared) == 0 {
- return nil, fmt.Errorf("need-declared caveat with no required attributes")
- }
- cavInfo.Condition = []byte(arg[i+1:])
- caveats, err := checker.CheckThirdPartyCaveat(ctx, cavInfo)
- if err != nil {
- return nil, errgo.Mask(err, errgo.Any)
- }
- declared := make(map[string]bool)
- for _, cav := range caveats {
- if cav.Location != "" {
- continue
- }
- // Note that we ignore the error. We allow the service to
- // generate caveats that we don't understand here.
- cond, arg, _ := checkers.ParseCaveat(cav.Condition)
- if cond != checkers.CondDeclared {
- continue
- }
- parts := strings.SplitN(arg, " ", 2)
- if len(parts) != 2 {
- return nil, errgo.Newf("declared caveat has no value")
- }
- declared[parts[0]] = true
- }
- // Add empty declarations for everything mentioned in need-declared
- // that was not actually declared.
- for _, d := range needDeclared {
- if !declared[d] {
- caveats = append(caveats, checkers.DeclaredCaveat(d, ""))
- }
- }
- return caveats, nil
-}
-
-func randomBytes(n int) ([]byte, error) {
- b := make([]byte, n)
- _, err := rand.Read(b)
- if err != nil {
- return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err)
- }
- return b, nil
-}
-
-// ThirdPartyCaveatInfo holds the information decoded from
-// a third party caveat id.
-type ThirdPartyCaveatInfo struct {
- // Condition holds the third party condition to be discharged.
- // This is the only field that most third party dischargers will
- // need to consider.
- Condition []byte
-
- // FirstPartyPublicKey holds the public key of the party
- // that created the third party caveat.
- FirstPartyPublicKey PublicKey
-
- // ThirdPartyKeyPair holds the key pair used to decrypt
- // the caveat - the key pair of the discharging service.
- ThirdPartyKeyPair KeyPair
-
- // RootKey holds the secret root key encoded by the caveat.
- RootKey []byte
-
- // CaveatId holds the full encoded caveat id from which all
- // the other fields are derived.
- Caveat []byte
-
- // Version holds the version that was used to encode
- // the caveat id.
- Version Version
-
- // Id holds the id of the third party caveat (the id that
- // the discharge macaroon should be given). This
- // will differ from Caveat when the caveat information
- // is encoded separately.
- Id []byte
-
- // Namespace holds the namespace of the first party
- // that created the macaroon, as encoded by the party
- // that added the third party caveat.
- Namespace *checkers.Namespace
-}
-
-// ThirdPartyCaveatChecker holds a function that checks third party caveats
-// for validity. If the caveat is valid, it returns a nil error and
-// optionally a slice of extra caveats that will be added to the
-// discharge macaroon. The caveatId parameter holds the still-encoded id
-// of the caveat.
-//
-// If the caveat kind was not recognised, the checker should return an
-// error with a ErrCaveatNotRecognized cause.
-type ThirdPartyCaveatChecker interface {
- CheckThirdPartyCaveat(ctx context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error)
-}
-
-// ThirdPartyCaveatCheckerFunc implements ThirdPartyCaveatChecker by calling a function.
-type ThirdPartyCaveatCheckerFunc func(context.Context, *ThirdPartyCaveatInfo) ([]checkers.Caveat, error)
-
-// CheckThirdPartyCaveat implements ThirdPartyCaveatChecker.CheckThirdPartyCaveat by calling
-// the receiver with the given arguments
-func (c ThirdPartyCaveatCheckerFunc) CheckThirdPartyCaveat(ctx context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error) {
- return c(ctx, info)
-}
-
-// FirstPartyCaveatChecker is used to check first party caveats
-// for validity with respect to information in the provided context.
-//
-// If the caveat kind was not recognised, the checker should return
-// ErrCaveatNotRecognized.
-type FirstPartyCaveatChecker interface {
- // CheckFirstPartyCaveat checks that the given caveat condition
- // is valid with respect to the given context information.
- CheckFirstPartyCaveat(ctx context.Context, caveat string) error
-
- // Namespace returns the namespace associated with the
- // caveat checker.
- Namespace() *checkers.Namespace
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go
deleted file mode 100644
index 9c117ba8..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package bakery
-
-import (
- "context"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// DischargeAll gathers discharge macaroons for all the third party
-// caveats in m (and any subsequent caveats required by those) using
-// getDischarge to acquire each discharge macaroon. It returns a slice
-// with m as the first element, followed by all the discharge macaroons.
-// All the discharge macaroons will be bound to the primary macaroon.
-//
-// The getDischarge function is passed the caveat to be discharged;
-// encryptedCaveat will be passed the external caveat payload found
-// in m, if any.
-func DischargeAll(
- ctx context.Context,
- m *Macaroon,
- getDischarge func(ctx context.Context, cav macaroon.Caveat, encryptedCaveat []byte) (*Macaroon, error),
-) (macaroon.Slice, error) {
- return DischargeAllWithKey(ctx, m, getDischarge, nil)
-}
-
-// DischargeAllWithKey is like DischargeAll except that the localKey
-// parameter may optionally hold the key of the client, in which case it
-// will be used to discharge any third party caveats with the special
-// location "local". In this case, the caveat itself must be "true". This
-// can be used be a server to ask a client to prove ownership of the
-// private key.
-//
-// When localKey is nil, DischargeAllWithKey is exactly the same as
-// DischargeAll.
-func DischargeAllWithKey(
- ctx context.Context,
- m *Macaroon,
- getDischarge func(ctx context.Context, cav macaroon.Caveat, encodedCaveat []byte) (*Macaroon, error),
- localKey *KeyPair,
-) (macaroon.Slice, error) {
- discharges, err := Slice{m}.DischargeAll(ctx, getDischarge, localKey)
- if err != nil {
- return nil, errgo.Mask(err, errgo.Any)
- }
- return discharges.Bind(), nil
-}
-
-var localDischargeChecker = ThirdPartyCaveatCheckerFunc(func(_ context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error) {
- if string(info.Condition) != "true" {
- return nil, checkers.ErrCaveatNotRecognized
- }
- return nil, nil
-})
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go
deleted file mode 100644
index f58f699d..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// The bakery package layers on top of the macaroon package, providing
-// a transport and store-agnostic way of using macaroons to assert
-// client capabilities.
-//
-// Summary
-//
-// The Bakery type is probably where you want to start.
-// It encapsulates a Checker type, which performs checking
-// of operations, and an Oven type, which encapsulates
-// the actual details of the macaroon encoding conventions.
-//
-// Most other types and functions are designed either to plug
-// into one of the above types (the various Authorizer
-// implementations, for example), or to expose some independent
-// functionality that's potentially useful (Discharge, for example).
-//
-// The rest of this introduction introduces some of the concepts
-// used by the bakery package.
-//
-// Identity and entities
-//
-// An Identity represents some authenticated user (or agent), usually
-// the client in a network protocol. An identity can be authenticated by
-// an external identity server (with a third party macaroon caveat) or
-// by locally provided information such as a username and password.
-//
-// The Checker type is not responsible for determining identity - that
-// functionality is represented by the IdentityClient interface.
-//
-// The Checker uses identities to decide whether something should be
-// allowed or not - the Authorizer interface is used to ask whether a
-// given identity should be allowed to perform some set of operations.
-//
-// Operations
-//
-// An operation defines some requested action on an entity. For example,
-// if file system server defines an entity for every file in the server,
-// an operation to read a file might look like:
-//
-// Op{
-// Entity: "/foo",
-// Action: "write",
-// }
-//
-// The exact set of entities and actions is up to the caller, but should
-// be kept stable over time because authorization tokens will contain
-// these names.
-//
-// To authorize some request on behalf of a remote user, first find out
-// what operations that request needs to perform. For example, if the
-// user tries to delete a file, the entity might be the path to the
-// file's directory and the action might be "write". It may often be
-// possible to determine the operations required by a request without
-// reference to anything external, when the request itself contains all
-// the necessary information.
-//
-// The LoginOp operation is special - any macaroon associated with this
-// operation is treated as a bearer of identity information. If two
-// valid LoginOp macaroons are presented, only the first one will be
-// used for identity.
-//
-// Authorization
-//
-// The Authorizer interface is responsible for determining whether a
-// given authenticated identity is authorized to perform a set of
-// operations. This is used when the macaroons provided to Auth are not
-// sufficient to authorize the operations themselves.
-//
-// Capabilities
-//
-// A "capability" is represented by a macaroon that's associated with
-// one or more operations, and grants the capability to perform all
-// those operations. The AllowCapability method reports whether a
-// capability is allowed. It takes into account any authenticated
-// identity and any other capabilities provided.
-//
-// Third party caveats
-//
-// Sometimes authorization will only be granted if a third party caveat
-// is discharged. This will happen when an IdentityClient or Authorizer
-// returns a third party caveat.
-//
-// When this happens, a DischargeRequiredError will be returned
-// containing the caveats and the operations required. The caller is
-// responsible for creating a macaroon with those caveats associated
-// with those operations and for passing that macaroon to the client to
-// discharge.
-package bakery
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go
deleted file mode 100644
index 1a059d59..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package bakery
-
-import (
- "fmt"
-
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-var (
- // ErrNotFound is returned by Store.Get implementations
- // to signal that an id has not been found.
- ErrNotFound = errgo.New("not found")
-
- // ErrPermissionDenied is returned from AuthChecker when
- // permission has been denied.
- ErrPermissionDenied = errgo.New("permission denied")
-)
-
-// DischargeRequiredError is returned when authorization has failed and a
-// discharged macaroon might fix it.
-//
-// A caller should grant the user the ability to authorize by minting a
-// macaroon associated with Ops (see MacaroonStore.MacaroonIdInfo for
-// how the associated operations are retrieved) and adding Caveats. If
-// the user succeeds in discharging the caveats, the authorization will
-// be granted.
-type DischargeRequiredError struct {
- // Message holds some reason why the authorization was denied.
- // TODO this is insufficient (and maybe unnecessary) because we
- // can have multiple errors.
- Message string
-
- // Ops holds all the operations that were not authorized.
- // If Ops contains a single LoginOp member, the macaroon
- // should be treated as an login token. Login tokens (also
- // known as authentication macaroons) usually have a longer
- // life span than other macaroons.
- Ops []Op
-
- // Caveats holds the caveats that must be added
- // to macaroons that authorize the above operations.
- Caveats []checkers.Caveat
-
- // ForAuthentication holds whether the macaroon holding
- // the discharges will be used for authentication, and hence
- // should have wider scope and longer lifetime.
- // The bakery package never sets this field, but bakery/identchecker
- // uses it.
- ForAuthentication bool
-}
-
-func (e *DischargeRequiredError) Error() string {
- return "macaroon discharge required: " + e.Message
-}
-
-func IsDischargeRequiredError(err error) bool {
- _, ok := err.(*DischargeRequiredError)
- return ok
-}
-
-// VerificationError is used to signify that an error is because
-// of a verification failure rather than because verification
-// could not be done.
-type VerificationError struct {
- Reason error
-}
-
-func (e *VerificationError) Error() string {
- return fmt.Sprintf("verification failed: %v", e.Reason)
-}
-
-func isVerificationError(err error) bool {
- _, ok := errgo.Cause(err).(*VerificationError)
- return ok
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go
deleted file mode 100644
index 7cffa9f3..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package bakery
-
-import (
- "context"
- "crypto/rand"
- "encoding/base64"
- "encoding/json"
- "strings"
- "sync"
-
- "golang.org/x/crypto/curve25519"
- "golang.org/x/crypto/nacl/box"
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-)
-
-// KeyLen is the byte length of the Ed25519 public and private keys used for
-// caveat id encryption.
-const KeyLen = 32
-
-// NonceLen is the byte length of the nonce values used for caveat id
-// encryption.
-const NonceLen = 24
-
-// PublicKey is a 256-bit Ed25519 public key.
-type PublicKey struct {
- Key
-}
-
-// PrivateKey is a 256-bit Ed25519 private key.
-type PrivateKey struct {
- Key
-}
-
-// Public derives the public key from a private key.
-func (k PrivateKey) Public() PublicKey {
- var pub PublicKey
- curve25519.ScalarBaseMult((*[32]byte)(&pub.Key), (*[32]byte)(&k.Key))
- return pub
-}
-
-// Key is a 256-bit Ed25519 key.
-type Key [KeyLen]byte
-
-// String returns the base64 representation of the key.
-func (k Key) String() string {
- return base64.StdEncoding.EncodeToString(k[:])
-}
-
-// MarshalBinary implements encoding.BinaryMarshaler.MarshalBinary.
-func (k Key) MarshalBinary() ([]byte, error) {
- return k[:], nil
-}
-
-// isZero reports whether the key consists entirely of zeros.
-func (k Key) isZero() bool {
- return k == Key{}
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaler.UnmarshalBinary.
-func (k *Key) UnmarshalBinary(data []byte) error {
- if len(data) != len(k) {
- return errgo.Newf("wrong length for key, got %d want %d", len(data), len(k))
- }
- copy(k[:], data)
- return nil
-}
-
-// MarshalText implements encoding.TextMarshaler.MarshalText.
-func (k Key) MarshalText() ([]byte, error) {
- data := make([]byte, base64.StdEncoding.EncodedLen(len(k)))
- base64.StdEncoding.Encode(data, k[:])
- return data, nil
-}
-
-// boxKey returns the box package's type for a key.
-func (k Key) boxKey() *[KeyLen]byte {
- return (*[KeyLen]byte)(&k)
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.UnmarshalText.
-func (k *Key) UnmarshalText(text []byte) error {
- data, err := macaroon.Base64Decode(text)
- if err != nil {
- return errgo.Notef(err, "cannot decode base64 key")
- }
- if len(data) != len(k) {
- return errgo.Newf("wrong length for key, got %d want %d", len(data), len(k))
- }
- copy(k[:], data)
- return nil
-}
-
-// ThirdPartyInfo holds information on a given third party
-// discharge service.
-type ThirdPartyInfo struct {
- // PublicKey holds the public key of the third party.
- PublicKey PublicKey
-
- // Version holds latest the bakery protocol version supported
- // by the discharger.
- Version Version
-}
-
-// ThirdPartyLocator is used to find information on third
-// party discharge services.
-type ThirdPartyLocator interface {
- // ThirdPartyInfo returns information on the third
- // party at the given location. It returns ErrNotFound if no match is found.
- // This method must be safe to call concurrently.
- ThirdPartyInfo(ctx context.Context, loc string) (ThirdPartyInfo, error)
-}
-
-// ThirdPartyStore implements a simple ThirdPartyLocator.
-// A trailing slash on locations is ignored.
-type ThirdPartyStore struct {
- mu sync.RWMutex
- m map[string]ThirdPartyInfo
-}
-
-// NewThirdPartyStore returns a new instance of ThirdPartyStore
-// that stores locations in memory.
-func NewThirdPartyStore() *ThirdPartyStore {
- return &ThirdPartyStore{
- m: make(map[string]ThirdPartyInfo),
- }
-}
-
-// AddInfo associates the given information with the
-// given location, ignoring any trailing slash.
-// This method is OK to call concurrently with sThirdPartyInfo.
-func (s *ThirdPartyStore) AddInfo(loc string, info ThirdPartyInfo) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.m[canonicalLocation(loc)] = info
-}
-
-func canonicalLocation(loc string) string {
- return strings.TrimSuffix(loc, "/")
-}
-
-// ThirdPartyInfo implements the ThirdPartyLocator interface.
-func (s *ThirdPartyStore) ThirdPartyInfo(ctx context.Context, loc string) (ThirdPartyInfo, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
- if info, ok := s.m[canonicalLocation(loc)]; ok {
- return info, nil
- }
- return ThirdPartyInfo{}, ErrNotFound
-}
-
-// KeyPair holds a public/private pair of keys.
-type KeyPair struct {
- Public PublicKey `json:"public"`
- Private PrivateKey `json:"private"`
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (k *KeyPair) UnmarshalJSON(data []byte) error {
- type keyPair KeyPair
- if err := json.Unmarshal(data, (*keyPair)(k)); err != nil {
- return err
- }
- return k.validate()
-}
-
-// UnmarshalYAML implements yaml.Unmarshaler.
-func (k *KeyPair) UnmarshalYAML(unmarshal func(interface{}) error) error {
- type keyPair KeyPair
- if err := unmarshal((*keyPair)(k)); err != nil {
- return err
- }
- return k.validate()
-}
-
-func (k *KeyPair) validate() error {
- if k.Public.isZero() {
- return errgo.Newf("missing public key")
- }
- if k.Private.isZero() {
- return errgo.Newf("missing private key")
- }
- return nil
-}
-
-// GenerateKey generates a new key pair.
-func GenerateKey() (*KeyPair, error) {
- var key KeyPair
- pub, priv, err := box.GenerateKey(rand.Reader)
- if err != nil {
- return nil, err
- }
- key.Public = PublicKey{*pub}
- key.Private = PrivateKey{*priv}
- return &key, nil
-}
-
-// MustGenerateKey is like GenerateKey but panics if GenerateKey returns
-// an error - useful in tests.
-func MustGenerateKey() *KeyPair {
- key, err := GenerateKey()
- if err != nil {
- panic(errgo.Notef(err, "cannot generate key"))
- }
- return key
-}
-
-// String implements the fmt.Stringer interface
-// by returning the base64 representation of the
-// public key part of key.
-func (key *KeyPair) String() string {
- return key.Public.String()
-}
-
-type emptyLocator struct{}
-
-func (emptyLocator) ThirdPartyInfo(context.Context, string) (ThirdPartyInfo, error) {
- return ThirdPartyInfo{}, ErrNotFound
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go
deleted file mode 100644
index acb5a1f5..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package bakery
-
-import (
- "context"
-)
-
-// Logger is used by the bakery to log informational messages
-// about bakery operations.
-type Logger interface {
- Infof(ctx context.Context, f string, args ...interface{})
- Debugf(ctx context.Context, f string, args ...interface{})
-}
-
-// DefaultLogger returns a Logger instance that does nothing.
-//
-// Deprecated: DefaultLogger exists for historical compatibility
-// only. Previously it logged using github.com/juju/loggo.
-func DefaultLogger(name string) Logger {
- return nopLogger{}
-}
-
-type nopLogger struct{}
-
-// Debugf implements Logger.Debugf.
-func (nopLogger) Debugf(context.Context, string, ...interface{}) {}
-
-// Debugf implements Logger.Infof.
-func (nopLogger) Infof(context.Context, string, ...interface{}) {}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go
deleted file mode 100644
index d5ad3b64..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go
+++ /dev/null
@@ -1,356 +0,0 @@
-package bakery
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "encoding/binary"
- "encoding/json"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// legacyNamespace holds the standard namespace as used by
-// pre-version3 macaroons.
-func legacyNamespace() *checkers.Namespace {
- ns := checkers.NewNamespace(nil)
- ns.Register(checkers.StdNamespace, "")
- return ns
-}
-
-// Macaroon represents an undischarged macaroon along with its first
-// party caveat namespace and associated third party caveat information
-// which should be passed to the third party when discharging a caveat.
-type Macaroon struct {
- // m holds the underlying macaroon.
- m *macaroon.Macaroon
-
- // version holds the version of the macaroon.
- version Version
-
- // caveatData maps from a third party caveat id to its
- // associated information, usually public-key encrypted with the
- // third party's public key.
- //
- // If version is less than Version3, this will always be nil,
- // because clients prior to that version do not support
- // macaroon-external caveat ids.
- caveatData map[string][]byte
-
- // namespace holds the first-party caveat namespace of the macaroon.
- namespace *checkers.Namespace
-
- // caveatIdPrefix holds the prefix to use for the ids of any third
- // party caveats created. This can be set when Discharge creates a
- // discharge macaroon.
- caveatIdPrefix []byte
-}
-
-// NewLegacyMacaroon returns a new macaroon holding m.
-// This should only be used when there's no alternative
-// (for example when m has been unmarshaled
-// from some alternative format).
-func NewLegacyMacaroon(m *macaroon.Macaroon) (*Macaroon, error) {
- v, err := bakeryVersion(m.Version())
- if err != nil {
- return nil, errgo.Mask(err)
- }
- return &Macaroon{
- m: m,
- version: v,
- namespace: legacyNamespace(),
- }, nil
-}
-
-type macaroonJSON struct {
- Macaroon *macaroon.Macaroon `json:"m"`
- Version Version `json:"v"`
- // Note: CaveatData is encoded using URL-base64-encoded keys
- // because JSON cannot deal with arbitrary byte sequences
- // in its strings, and URL-base64 values to match the
- // standard macaroon encoding.
- CaveatData map[string]string `json:"cdata,omitempty"`
- Namespace *checkers.Namespace `json:"ns"`
-}
-
-// Clone returns a copy of the macaroon. Note that the the new
-// macaroon's namespace still points to the same underlying Namespace -
-// copying the macaroon does not make a copy of the namespace.
-func (m *Macaroon) Clone() *Macaroon {
- m1 := *m
- m1.m = m1.m.Clone()
- m1.caveatData = make(map[string][]byte)
- for id, data := range m.caveatData {
- m1.caveatData[id] = data
- }
- return &m1
-}
-
-// MarshalJSON implements json.Marshaler by marshaling
-// the macaroon into the original macaroon format if the
-// version is earlier than Version3.
-func (m *Macaroon) MarshalJSON() ([]byte, error) {
- if m.version < Version3 {
- if len(m.caveatData) > 0 {
- return nil, errgo.Newf("cannot marshal pre-version3 macaroon with external caveat data")
- }
- return m.m.MarshalJSON()
- }
- caveatData := make(map[string]string)
- for id, data := range m.caveatData {
- caveatData[base64.RawURLEncoding.EncodeToString([]byte(id))] = base64.RawURLEncoding.EncodeToString(data)
- }
- return json.Marshal(macaroonJSON{
- Macaroon: m.m,
- Version: m.version,
- CaveatData: caveatData,
- Namespace: m.namespace,
- })
-}
-
-// UnmarshalJSON implements json.Unmarshaler by unmarshaling in a
-// backwardly compatible way - if provided with a previous macaroon
-// version, it will unmarshal that too.
-func (m *Macaroon) UnmarshalJSON(data []byte) error {
- // First try with new data format.
- var m1 macaroonJSON
- if err := json.Unmarshal(data, &m1); err != nil {
- // If we get an unmarshal error, we won't be able
- // to unmarshal into the old format either, as extra fields
- // are ignored.
- return errgo.Mask(err)
- }
- if m1.Macaroon == nil {
- return m.unmarshalJSONOldFormat(data)
- }
- // We've got macaroon field - it's the new format.
- if m1.Version < Version3 || m1.Version > LatestVersion {
- return errgo.Newf("unexpected bakery macaroon version; got %d want %d", m1.Version, Version3)
- }
- if got, want := m1.Macaroon.Version(), MacaroonVersion(m1.Version); got != want {
- return errgo.Newf("underlying macaroon has inconsistent version; got %d want %d", got, want)
- }
- caveatData := make(map[string][]byte)
- for id64, data64 := range m1.CaveatData {
- id, err := macaroon.Base64Decode([]byte(id64))
- if err != nil {
- return errgo.Notef(err, "cannot decode caveat id")
- }
- data, err := macaroon.Base64Decode([]byte(data64))
- if err != nil {
- return errgo.Notef(err, "cannot decode caveat")
- }
- caveatData[string(id)] = data
- }
- m.caveatData = caveatData
- m.m = m1.Macaroon
- m.namespace = m1.Namespace
- // TODO should we allow version > LatestVersion here?
- m.version = m1.Version
- return nil
-}
-
-// unmarshalJSONOldFormat unmarshals the data from an old format
-// macaroon (without any external caveats or namespace).
-func (m *Macaroon) unmarshalJSONOldFormat(data []byte) error {
- // Try to unmarshal from the original format.
- var m1 *macaroon.Macaroon
- if err := json.Unmarshal(data, &m1); err != nil {
- return errgo.Mask(err)
- }
- m2, err := NewLegacyMacaroon(m1)
- if err != nil {
- return errgo.Mask(err)
- }
- *m = *m2
- return nil
-}
-
-// bakeryVersion returns a bakery version that corresponds to
-// the macaroon version v. It is necessarily approximate because
-// several bakery versions can correspond to a single macaroon
-// version, so it's only of use when decoding legacy formats
-// (in Macaroon.UnmarshalJSON).
-//
-// It will return an error if it doesn't recognize the version.
-func bakeryVersion(v macaroon.Version) (Version, error) {
- switch v {
- case macaroon.V1:
- // Use version 1 because we don't know of any existing
- // version 0 clients.
- return Version1, nil
- case macaroon.V2:
- // Note that this could also correspond to Version3, but
- // this logic is explicitly for legacy versions.
- return Version2, nil
- default:
- return 0, errgo.Newf("unknown macaroon version when legacy-unmarshaling bakery macaroon; got %d", v)
- }
-}
-
-// NewMacaroon creates and returns a new macaroon with the given root
-// key, id and location. If the version is more than the latest known
-// version, the latest known version will be used. The namespace is that
-// of the service creating it.
-func NewMacaroon(rootKey, id []byte, location string, version Version, ns *checkers.Namespace) (*Macaroon, error) {
- if version > LatestVersion {
- version = LatestVersion
- }
- m, err := macaroon.New(rootKey, id, location, MacaroonVersion(version))
- if err != nil {
- return nil, errgo.Notef(err, "cannot create macaroon")
- }
- return &Macaroon{
- m: m,
- version: version,
- namespace: ns,
- }, nil
-}
-
-// M returns the underlying macaroon held within m.
-func (m *Macaroon) M() *macaroon.Macaroon {
- return m.m
-}
-
-// Version returns the bakery version of the first party
-// that created the macaroon.
-func (m *Macaroon) Version() Version {
- return m.version
-}
-
-// Namespace returns the first party caveat namespace of the macaroon.
-func (m *Macaroon) Namespace() *checkers.Namespace {
- return m.namespace
-}
-
-// AddCaveats is a convenienced method that calls m.AddCaveat for each
-// caveat in cavs.
-func (m *Macaroon) AddCaveats(ctx context.Context, cavs []checkers.Caveat, key *KeyPair, loc ThirdPartyLocator) error {
- for _, cav := range cavs {
- if err := m.AddCaveat(ctx, cav, key, loc); err != nil {
- return errgo.Notef(err, "cannot add caveat %#v", cav)
- }
- }
- return nil
-}
-
-// AddCaveat adds a caveat to the given macaroon.
-//
-// If it's a third-party caveat, it encrypts it using the given key pair
-// and by looking up the location using the given locator. If it's a
-// first party cavat, key and loc are unused.
-//
-// As a special case, if the caveat's Location field has the prefix
-// "local " the caveat is added as a client self-discharge caveat using
-// the public key base64-encoded in the rest of the location. In this
-// case, the Condition field must be empty. The resulting third-party
-// caveat will encode the condition "true" encrypted with that public
-// key. See LocalThirdPartyCaveat for a way of creating such caveats.
-func (m *Macaroon) AddCaveat(ctx context.Context, cav checkers.Caveat, key *KeyPair, loc ThirdPartyLocator) error {
- if cav.Location == "" {
- if err := m.m.AddFirstPartyCaveat([]byte(m.namespace.ResolveCaveat(cav).Condition)); err != nil {
- return errgo.Mask(err)
- }
- return nil
- }
- if key == nil {
- return errgo.Newf("no private key to encrypt third party caveat")
- }
- var info ThirdPartyInfo
- if localInfo, ok := parseLocalLocation(cav.Location); ok {
- info = localInfo
- cav.Location = "local"
- if cav.Condition != "" {
- return errgo.New("cannot specify caveat condition in local third-party caveat")
- }
- cav.Condition = "true"
- } else {
- if loc == nil {
- return errgo.Newf("no locator when adding third party caveat")
- }
- var err error
- info, err = loc.ThirdPartyInfo(ctx, cav.Location)
- if err != nil {
- return errgo.Notef(err, "cannot find public key for location %q", cav.Location)
- }
- }
- rootKey, err := randomBytes(24)
- if err != nil {
- return errgo.Notef(err, "cannot generate third party secret")
- }
- // Use the least supported version to encode the caveat.
- if m.version < info.Version {
- info.Version = m.version
- }
- caveatInfo, err := encodeCaveat(cav.Condition, rootKey, info, key, m.namespace)
- if err != nil {
- return errgo.Notef(err, "cannot create third party caveat at %q", cav.Location)
- }
- var id []byte
- if info.Version < Version3 {
- // We're encoding for an earlier client or third party which does
- // not understand bundled caveat info, so use the encoded
- // caveat information as the caveat id.
- id = caveatInfo
- } else {
- id = m.newCaveatId(m.caveatIdPrefix)
- if m.caveatData == nil {
- m.caveatData = make(map[string][]byte)
- }
- m.caveatData[string(id)] = caveatInfo
- }
- if err := m.m.AddThirdPartyCaveat(rootKey, id, cav.Location); err != nil {
- return errgo.Notef(err, "cannot add third party caveat")
- }
- return nil
-}
-
-// newCaveatId returns a third party caveat id that
-// does not duplicate any third party caveat ids already inside m.
-//
-// If base is non-empty, it is used as the id prefix.
-func (m *Macaroon) newCaveatId(base []byte) []byte {
- var id []byte
- if len(base) > 0 {
- id = make([]byte, len(base), len(base)+binary.MaxVarintLen64)
- copy(id, base)
- } else {
- id = make([]byte, 0, 1+binary.MaxVarintLen32)
- // Add a version byte to the caveat id. Technically
- // this is unnecessary as the caveat-decoding logic
- // that looks at versions should never see this id,
- // but if the caveat payload isn't provided with the
- // payload, having this version gives a strong indication
- // that the payload has been omitted so we can produce
- // a better error for the user.
- id = append(id, byte(Version3))
- }
-
- // Iterate through integers looking for one that isn't already used,
- // starting from n so that if everyone is using this same algorithm,
- // we'll only perform one iteration.
- //
- // Note that although this looks like an infinite loop,
- // there's no way that it can run for more iterations
- // than the total number of existing third party caveats,
- // whatever their ids.
- caveats := m.m.Caveats()
-again:
- for i := len(m.caveatData); ; i++ {
- // We append a varint to the end of the id and assume that
- // any client that's created the id that we're using as a base
- // is using similar conventions - in the worst case they might
- // end up with a duplicate third party caveat id and thus create
- // a macaroon that cannot be discharged.
- id1 := appendUvarint(id, uint64(i))
- for _, cav := range caveats {
- if cav.VerificationId != nil && bytes.Equal(cav.Id, id1) {
- continue again
- }
- }
- return id1
- }
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go
deleted file mode 100644
index 83ce8908..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go
+++ /dev/null
@@ -1,359 +0,0 @@
-package bakery
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "sort"
-
- "github.com/go-macaroon-bakery/macaroonpb"
- "github.com/rogpeppe/fastuuid"
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// MacaroonVerifier verifies macaroons and returns the operations and
-// caveats they're associated with.
-type MacaroonVerifier interface {
- // VerifyMacaroon verifies the signature of the given macaroon and returns
- // information on its associated operations, and all the first party
- // caveat conditions that need to be checked.
- //
- // This method should not check first party caveats itself.
- //
- // It should return a *VerificationError if the error occurred
- // because the macaroon signature failed or the root key
- // was not found - any other error will be treated as fatal
- // by Checker and cause authorization to terminate.
- VerifyMacaroon(ctx context.Context, ms macaroon.Slice) ([]Op, []string, error)
-}
-
-var uuidGen = fastuuid.MustNewGenerator()
-
-// Oven bakes macaroons. They emerge sweet and delicious
-// and ready for use in a Checker.
-//
-// All macaroons are associated with one or more operations (see
-// the Op type) which define the capabilities of the macaroon.
-//
-// There is one special operation, "login" (defined by LoginOp)
-// which grants the capability to speak for a particular user.
-// The login capability will never be mixed with other capabilities.
-//
-// It is up to the caller to decide on semantics for other operations.
-type Oven struct {
- p OvenParams
-}
-
-type OvenParams struct {
- // Namespace holds the namespace to use when adding first party caveats.
- // If this is nil, checkers.New(nil).Namespace will be used.
- Namespace *checkers.Namespace
-
- // RootKeyStoreForEntity returns the macaroon storage to be
- // used for root keys associated with macaroons created
- // wth NewMacaroon.
- //
- // If this is nil, NewMemRootKeyStore will be used to create
- // a new store to be used for all entities.
- RootKeyStoreForOps func(ops []Op) RootKeyStore
-
- // Key holds the private key pair used to encrypt third party caveats.
- // If it is nil, no third party caveats can be created.
- Key *KeyPair
-
- // Location holds the location that will be associated with new macaroons
- // (as returned by Macaroon.Location).
- Location string
-
- // Locator is used to find out information on third parties when
- // adding third party caveats. If this is nil, no non-local third
- // party caveats can be added.
- Locator ThirdPartyLocator
-
- // LegacyMacaroonOp holds the operation to associate with old
- // macaroons that don't have associated operations.
- // If this is empty, legacy macaroons will not be associated
- // with any operations.
- LegacyMacaroonOp Op
-
- // TODO max macaroon or macaroon id size?
-}
-
-// NewOven returns a new oven using the given parameters.
-func NewOven(p OvenParams) *Oven {
- if p.Locator == nil {
- p.Locator = emptyLocator{}
- }
- if p.RootKeyStoreForOps == nil {
- store := NewMemRootKeyStore()
- p.RootKeyStoreForOps = func(ops []Op) RootKeyStore {
- return store
- }
- }
- if p.Namespace == nil {
- p.Namespace = checkers.New(nil).Namespace()
- }
- return &Oven{
- p: p,
- }
-}
-
-// VerifyMacaroon implements MacaroonVerifier.VerifyMacaroon, making Oven
-// an instance of MacaroonVerifier.
-//
-// For macaroons minted with previous bakery versions, it always
-// returns a single LoginOp operation.
-func (o *Oven) VerifyMacaroon(ctx context.Context, ms macaroon.Slice) (ops []Op, conditions []string, err error) {
- if len(ms) == 0 {
- return nil, nil, errgo.Newf("no macaroons in slice")
- }
- storageId, ops, err := o.decodeMacaroonId(ms[0].Id())
- if err != nil {
- return nil, nil, errgo.Mask(err)
- }
- rootKey, err := o.p.RootKeyStoreForOps(ops).Get(ctx, storageId)
- if err != nil {
- if errgo.Cause(err) != ErrNotFound {
- return nil, nil, errgo.Notef(err, "cannot get macaroon")
- }
- // If the macaroon was not found, it is probably
- // because it's been removed after time-expiry,
- // so return a verification error.
- return nil, nil, &VerificationError{
- Reason: errgo.Newf("macaroon not found in storage"),
- }
- }
- conditions, err = ms[0].VerifySignature(rootKey, ms[1:])
- if err != nil {
- return nil, nil, &VerificationError{
- Reason: errgo.Mask(err),
- }
- }
- return ops, conditions, nil
-}
-
-func (o *Oven) decodeMacaroonId(id []byte) (storageId []byte, ops []Op, err error) {
- base64Decoded := false
- if id[0] == 'A' {
- // The first byte is not a version number and it's 'A', which is the
- // base64 encoding of the top 6 bits (all zero) of the version number 2 or 3,
- // so we assume that it's the base64 encoding of a new-style
- // macaroon id, so we base64 decode it.
- //
- // Note that old-style ids always start with an ASCII character >= 4
- // (> 32 in fact) so this logic won't be triggered for those.
- dec := make([]byte, base64.RawURLEncoding.DecodedLen(len(id)))
- n, err := base64.RawURLEncoding.Decode(dec, id)
- if err == nil {
- // Set the id only on success - if it's a bad encoding, we'll get a not-found error
- // which is fine because "not found" is a correct description of the issue - we
- // can't find the root key for the given id.
- id = dec[0:n]
- base64Decoded = true
- }
- }
- // Trim any extraneous information from the id before retrieving
- // it from storage, including the UUID that's added when
- // creating macaroons to make all macaroons unique even if
- // they're using the same root key.
- switch id[0] {
- case byte(Version2):
- // Skip the UUID at the start of the id.
- storageId = id[1+16:]
- case byte(Version3):
- var id1 macaroonpb.MacaroonId
- if err := id1.UnmarshalBinary(id[1:]); err != nil {
- return nil, nil, errgo.Notef(err, "cannot unmarshal macaroon id")
- }
- if len(id1.Ops) == 0 || len(id1.Ops[0].Actions) == 0 {
- return nil, nil, errgo.Newf("no operations found in macaroon")
- }
- ops = make([]Op, 0, len(id1.Ops))
- for _, op := range id1.Ops {
- for _, action := range op.Actions {
- ops = append(ops, Op{
- Entity: op.Entity,
- Action: action,
- })
- }
- }
- return id1.StorageId, ops, nil
- }
- if !base64Decoded && isLowerCaseHexChar(id[0]) {
- // It's an old-style id, probably with a hyphenated UUID.
- // so trim that off.
- if i := bytes.LastIndexByte(id, '-'); i >= 0 {
- storageId = id[0:i]
- }
- }
- if op := o.p.LegacyMacaroonOp; op != (Op{}) {
- ops = []Op{op}
- }
- return storageId, ops, nil
-}
-
-// NewMacaroon takes a macaroon with the given version from the oven, associates it with the given operations
-// and attaches the given caveats. There must be at least one operation specified.
-func (o *Oven) NewMacaroon(ctx context.Context, version Version, caveats []checkers.Caveat, ops ...Op) (*Macaroon, error) {
- if len(ops) == 0 {
- return nil, errgo.Newf("cannot mint a macaroon associated with no operations")
- }
- ops = CanonicalOps(ops)
- rootKey, storageId, err := o.p.RootKeyStoreForOps(ops).RootKey(ctx)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- id, err := o.newMacaroonId(ctx, ops, storageId)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- idBytesNoVersion, err := id.MarshalBinary()
- if err != nil {
- return nil, errgo.Mask(err)
- }
- idBytes := make([]byte, len(idBytesNoVersion)+1)
- idBytes[0] = byte(LatestVersion)
- // TODO We could use a proto.Buffer to avoid this copy.
- copy(idBytes[1:], idBytesNoVersion)
-
- if MacaroonVersion(version) < macaroon.V2 {
- // The old macaroon format required valid text for the macaroon id,
- // so base64-encode it.
- b64data := make([]byte, base64.RawURLEncoding.EncodedLen(len(idBytes)))
- base64.RawURLEncoding.Encode(b64data, idBytes)
- idBytes = b64data
- }
- m, err := NewMacaroon(rootKey, idBytes, o.p.Location, version, o.p.Namespace)
- if err != nil {
- return nil, errgo.Notef(err, "cannot create macaroon with version %v", version)
- }
- if err := o.AddCaveats(ctx, m, caveats); err != nil {
- return nil, errgo.Mask(err)
- }
- return m, nil
-}
-
-// AddCaveat adds a caveat to the given macaroon.
-func (o *Oven) AddCaveat(ctx context.Context, m *Macaroon, cav checkers.Caveat) error {
- return m.AddCaveat(ctx, cav, o.p.Key, o.p.Locator)
-}
-
-// AddCaveats adds all the caveats to the given macaroon.
-func (o *Oven) AddCaveats(ctx context.Context, m *Macaroon, caveats []checkers.Caveat) error {
- return m.AddCaveats(ctx, caveats, o.p.Key, o.p.Locator)
-}
-
-// Key returns the oven's private/public key par.
-func (o *Oven) Key() *KeyPair {
- return o.p.Key
-}
-
-// Locator returns the third party locator that the
-// oven was created with.
-func (o *Oven) Locator() ThirdPartyLocator {
- return o.p.Locator
-}
-
-// CanonicalOps returns the given operations slice sorted
-// with duplicates removed.
-func CanonicalOps(ops []Op) []Op {
- canonOps := opsByValue(ops)
- needNewSlice := false
- for i := 1; i < len(ops); i++ {
- if !canonOps.Less(i-1, i) {
- needNewSlice = true
- break
- }
- }
- if !needNewSlice {
- return ops
- }
- canonOps = make([]Op, len(ops))
- copy(canonOps, ops)
- sort.Sort(canonOps)
-
- // Note we know that there's at least one operation here
- // because we'd have returned earlier if the slice was empty.
- j := 0
- for _, op := range canonOps[1:] {
- if op != canonOps[j] {
- j++
- canonOps[j] = op
- }
- }
- return canonOps[0 : j+1]
-}
-
-func (o *Oven) newMacaroonId(ctx context.Context, ops []Op, storageId []byte) (*macaroonpb.MacaroonId, error) {
- uuid := uuidGen.Next()
- nonce := uuid[0:16]
- return &macaroonpb.MacaroonId{
- Nonce: nonce,
- StorageId: storageId,
- Ops: macaroonIdOps(ops),
- }, nil
-}
-
-// macaroonIdOps returns operations suitable for serializing
-// as part of an *macaroonpb.MacaroonId. It assumes that
-// ops has been canonicalized and that there's at least
-// one operation.
-func macaroonIdOps(ops []Op) []*macaroonpb.Op {
- idOps := make([]macaroonpb.Op, 0, len(ops))
- idOps = append(idOps, macaroonpb.Op{
- Entity: ops[0].Entity,
- Actions: []string{ops[0].Action},
- })
- i := 0
- idOp := &idOps[0]
- for _, op := range ops[1:] {
- if op.Entity != idOp.Entity {
- idOps = append(idOps, macaroonpb.Op{
- Entity: op.Entity,
- Actions: []string{op.Action},
- })
- i++
- idOp = &idOps[i]
- continue
- }
- if op.Action != idOp.Actions[len(idOp.Actions)-1] {
- idOp.Actions = append(idOp.Actions, op.Action)
- }
- }
- idOpPtrs := make([]*macaroonpb.Op, len(idOps))
- for i := range idOps {
- idOpPtrs[i] = &idOps[i]
- }
- return idOpPtrs
-}
-
-type opsByValue []Op
-
-func (o opsByValue) Less(i, j int) bool {
- o0, o1 := o[i], o[j]
- if o0.Entity != o1.Entity {
- return o0.Entity < o1.Entity
- }
- return o0.Action < o1.Action
-}
-
-func (o opsByValue) Swap(i, j int) {
- o[i], o[j] = o[j], o[i]
-}
-
-func (o opsByValue) Len() int {
- return len(o)
-}
-
-func isLowerCaseHexChar(c byte) bool {
- switch {
- case '0' <= c && c <= '9':
- return true
- case 'a' <= c && c <= 'f':
- return true
- }
- return false
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go
deleted file mode 100644
index 20c5fcc7..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package bakery
-
-import (
- "context"
- "fmt"
- "time"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// Slice holds a slice of unbound macaroons.
-type Slice []*Macaroon
-
-// Bind prepares the macaroon slice for use in a request. This must be
-// done before presenting the macaroons to a service for use as
-// authorization tokens. The result will only be valid
-// if s contains discharge macaroons for all third party
-// caveats.
-//
-// All the macaroons in the returned slice will be copies
-// of this in s, not references.
-func (s Slice) Bind() macaroon.Slice {
- if len(s) == 0 {
- return nil
- }
- ms := make(macaroon.Slice, len(s))
- ms[0] = s[0].M().Clone()
- rootSig := ms[0].Signature()
- for i, m := range s[1:] {
- m1 := m.M().Clone()
- m1.Bind(rootSig)
- ms[i+1] = m1
- }
- return ms
-}
-
-// Purge returns a new slice holding all macaroons in s
-// that expire after the given time.
-func (ms Slice) Purge(t time.Time) Slice {
- ms1 := make(Slice, 0, len(ms))
- for i, m := range ms {
- et, ok := checkers.ExpiryTime(m.Namespace(), m.M().Caveats())
- if !ok || et.After(t) {
- ms1 = append(ms1, m)
- } else if i == 0 {
- // The primary macaroon has expired, so all its discharges
- // have expired too.
- // TODO purge all discharge macaroons when the macaroon
- // containing their third-party caveat expires.
- return nil
- }
- }
- return ms1
-}
-
-// DischargeAll discharges all the third party caveats in the slice for
-// which discharge macaroons are not already present, using getDischarge
-// to acquire the discharge macaroons. It always returns the slice with
-// any acquired discharge macaroons added, even on error. It returns an
-// error if all the discharges could not be acquired.
-//
-// Note that this differs from DischargeAll in that it can be given several existing
-// discharges, and that the resulting discharges are not bound to the primary,
-// so it's still possible to add caveats and reacquire expired discharges
-// without reacquiring the primary macaroon.
-func (ms Slice) DischargeAll(ctx context.Context, getDischarge func(ctx context.Context, cav macaroon.Caveat, encryptedCaveat []byte) (*Macaroon, error), localKey *KeyPair) (Slice, error) {
- if len(ms) == 0 {
- return nil, errgo.Newf("no macaroons to discharge")
- }
- ms1 := make(Slice, len(ms))
- copy(ms1, ms)
- // have holds the keys of all the macaroon ids in the slice.
- type needCaveat struct {
- // cav holds the caveat that needs discharge.
- cav macaroon.Caveat
- // encryptedCaveat holds encrypted caveat
- // if it was held externally.
- encryptedCaveat []byte
- }
- var need []needCaveat
- have := make(map[string]bool)
- for _, m := range ms[1:] {
- have[string(m.M().Id())] = true
- }
- // addCaveats adds any required third party caveats to the need slice
- // that aren't already present .
- addCaveats := func(m *Macaroon) {
- for _, cav := range m.M().Caveats() {
- if len(cav.VerificationId) == 0 || have[string(cav.Id)] {
- continue
- }
- need = append(need, needCaveat{
- cav: cav,
- encryptedCaveat: m.caveatData[string(cav.Id)],
- })
- }
- }
- for _, m := range ms {
- addCaveats(m)
- }
- var errs []error
- for len(need) > 0 {
- cav := need[0]
- need = need[1:]
- var dm *Macaroon
- var err error
- if localKey != nil && cav.cav.Location == "local" {
- // TODO use a small caveat id.
- dm, err = Discharge(ctx, DischargeParams{
- Key: localKey,
- Checker: localDischargeChecker,
- Caveat: cav.encryptedCaveat,
- Id: cav.cav.Id,
- Locator: emptyLocator{},
- })
- } else {
- dm, err = getDischarge(ctx, cav.cav, cav.encryptedCaveat)
- }
- if err != nil {
- errs = append(errs, errgo.NoteMask(err, fmt.Sprintf("cannot get discharge from %q", cav.cav.Location), errgo.Any))
- continue
- }
- ms1 = append(ms1, dm)
- addCaveats(dm)
- }
- if errs != nil {
- // TODO log other errors? Return them all?
- return ms1, errgo.Mask(errs[0], errgo.Any)
- }
- return ms1, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go
deleted file mode 100644
index b8b19408..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package bakery
-
-import (
- "context"
- "sync"
-)
-
-// RootKeyStore defines store for macaroon root keys.
-type RootKeyStore interface {
- // Get returns the root key for the given id.
- // If the item is not there, it returns ErrNotFound.
- Get(ctx context.Context, id []byte) ([]byte, error)
-
- // RootKey returns the root key to be used for making a new
- // macaroon, and an id that can be used to look it up later with
- // the Get method.
- //
- // Note that the root keys should remain available for as long
- // as the macaroons using them are valid.
- //
- // Note that there is no need for it to return a new root key
- // for every call - keys may be reused, although some key
- // cycling is over time is advisable.
- RootKey(ctx context.Context) (rootKey []byte, id []byte, err error)
-}
-
-// NewMemRootKeyStore returns an implementation of
-// Store that generates a single key and always
-// returns that from RootKey. The same id ("0") is always
-// used.
-func NewMemRootKeyStore() RootKeyStore {
- return new(memRootKeyStore)
-}
-
-type memRootKeyStore struct {
- mu sync.Mutex
- key []byte
-}
-
-// Get implements Store.Get.
-func (s *memRootKeyStore) Get(_ context.Context, id []byte) ([]byte, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if len(id) != 1 || id[0] != '0' || s.key == nil {
- return nil, ErrNotFound
- }
- return s.key, nil
-}
-
-// RootKey implements Store.RootKey by always returning the same root
-// key.
-func (s *memRootKeyStore) RootKey(context.Context) (rootKey, id []byte, err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.key == nil {
- newKey, err := randomBytes(24)
- if err != nil {
- return nil, nil, err
- }
- s.key = newKey
- }
- return s.key, []byte("0"), nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go
deleted file mode 100644
index 9f8e87bb..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package bakery
-
-import "gopkg.in/macaroon.v2"
-
-// Version represents a version of the bakery protocol.
-type Version int
-
-const (
- // In version 0, discharge-required errors use status 407
- Version0 Version = 0
- // In version 1, discharge-required errors use status 401.
- Version1 Version = 1
- // In version 2, binary macaroons and caveat ids are supported.
- Version2 Version = 2
- // In version 3, we support operations associated with macaroons
- // and external third party caveats.
- Version3 Version = 3
- LatestVersion = Version3
-)
-
-// MacaroonVersion returns the macaroon version that should
-// be used with the given bakery Version.
-func MacaroonVersion(v Version) macaroon.Version {
- switch v {
- case Version0, Version1:
- return macaroon.V1
- default:
- return macaroon.V2
- }
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go
deleted file mode 100644
index 8cc2e2a3..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "os"
-
- "github.com/juju/webbrowser"
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
-)
-
-const WebBrowserInteractionKind = "browser-window"
-
-// WaitTokenResponse holds the response type
-// returned, JSON-encoded, from the waitToken
-// URL passed to SetBrowserInteraction.
-type WaitTokenResponse struct {
- Kind string `json:"kind"`
- // Token holds the token value when it's well-formed utf-8
- Token string `json:"token,omitempty"`
- // Token64 holds the token value, base64 encoded, when it's
- // not well-formed utf-8.
- Token64 string `json:"token64,omitempty"`
-}
-
-// WaitResponse holds the type that should be returned
-// by an HTTP response made to a LegacyWaitURL
-// (See the ErrorInfo type).
-type WaitResponse struct {
- Macaroon *bakery.Macaroon
-}
-
-// WebBrowserInteractionInfo holds the information
-// expected in the browser-window interaction
-// entry in an interaction-required error.
-type WebBrowserInteractionInfo struct {
- // VisitURL holds the URL to be visited in a web browser.
- VisitURL string
-
- // WaitTokenURL holds a URL that will block on GET
- // until the browser interaction has completed.
- // On success, the response is expected to hold a waitTokenResponse
- // in its body holding the token to be returned from the
- // Interact method.
- WaitTokenURL string
-}
-
-var (
- _ Interactor = WebBrowserInteractor{}
- _ LegacyInteractor = WebBrowserInteractor{}
-)
-
-// OpenWebBrowser opens a web browser at the
-// given URL. If the OS is not recognised, the URL
-// is just printed to standard output.
-func OpenWebBrowser(url *url.URL) error {
- err := webbrowser.Open(url)
- if err == nil {
- fmt.Fprintf(os.Stderr, "Opening an authorization web page in your browser.\n")
- fmt.Fprintf(os.Stderr, "If it does not open, please open this URL:\n%s\n", url)
- return nil
- }
- if err == webbrowser.ErrNoBrowser {
- fmt.Fprintf(os.Stderr, "Please open this URL in your browser to authorize:\n%s\n", url)
- return nil
- }
- return err
-}
-
-// SetWebBrowserInteraction adds information about web-browser-based
-// interaction to the given error, which should be an
-// interaction-required error that's about to be returned from a
-// discharge request.
-//
-// The visitURL parameter holds a URL that should be visited by the user
-// in a web browser; the waitTokenURL parameter holds a URL that can be
-// long-polled to acquire the resulting discharge token.
-//
-// Use SetLegacyInteraction to add support for legacy clients
-// that don't understand the newer InteractionMethods field.
-func SetWebBrowserInteraction(e *Error, visitURL, waitTokenURL string) {
- e.SetInteraction(WebBrowserInteractionKind, WebBrowserInteractionInfo{
- VisitURL: visitURL,
- WaitTokenURL: waitTokenURL,
- })
-}
-
-// SetLegacyInteraction adds information about web-browser-based
-// interaction (or other kinds of legacy-protocol interaction) to the
-// given error, which should be an interaction-required error that's
-// about to be returned from a discharge request.
-//
-// The visitURL parameter holds a URL that should be visited by the user
-// in a web browser (or with an "Accept: application/json" header to
-// find out the set of legacy interaction methods).
-//
-// The waitURL parameter holds a URL that can be long-polled
-// to acquire the discharge macaroon.
-func SetLegacyInteraction(e *Error, visitURL, waitURL string) {
- if e.Info == nil {
- e.Info = new(ErrorInfo)
- }
- e.Info.LegacyVisitURL = visitURL
- e.Info.LegacyWaitURL = waitURL
-}
-
-// WebBrowserInteractor handls web-browser-based
-// interaction-required errors by opening a web
-// browser to allow the user to prove their
-// credentials interactively.
-//
-// It implements the Interactor interface, so instances
-// can be used with Client.AddInteractor.
-type WebBrowserInteractor struct {
- // OpenWebBrowser is used to visit a page in
- // the user's web browser. If it's nil, the
- // OpenWebBrowser function will be used.
- OpenWebBrowser func(*url.URL) error
-}
-
-// Kind implements Interactor.Kind.
-func (WebBrowserInteractor) Kind() string {
- return WebBrowserInteractionKind
-}
-
-// Interact implements Interactor.Interact by opening a new web page.
-func (wi WebBrowserInteractor) Interact(ctx context.Context, client *Client, location string, irErr *Error) (*DischargeToken, error) {
- var p WebBrowserInteractionInfo
- if err := irErr.InteractionMethod(wi.Kind(), &p); err != nil {
- return nil, errgo.Mask(err, errgo.Is(ErrInteractionMethodNotFound))
- }
- visitURL, err := relativeURL(location, p.VisitURL)
- if err != nil {
- return nil, errgo.Notef(err, "cannot make relative visit URL")
- }
- waitTokenURL, err := relativeURL(location, p.WaitTokenURL)
- if err != nil {
- return nil, errgo.Notef(err, "cannot make relative wait URL")
- }
- if err := wi.openWebBrowser(visitURL); err != nil {
- return nil, errgo.Mask(err)
- }
- return waitForToken(ctx, client, waitTokenURL)
-}
-
-func (wi WebBrowserInteractor) openWebBrowser(u *url.URL) error {
- open := wi.OpenWebBrowser
- if open == nil {
- open = OpenWebBrowser
- }
- if err := open(u); err != nil {
- return errgo.Mask(err)
- }
- return nil
-}
-
-// waitForToken returns a token from a the waitToken URL
-func waitForToken(ctx context.Context, client *Client, waitTokenURL *url.URL) (*DischargeToken, error) {
- // TODO integrate this with waitForMacaroon somehow?
- req, err := http.NewRequest("GET", waitTokenURL.String(), nil)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- req = req.WithContext(ctx)
- httpResp, err := client.Client.Do(req)
- if err != nil {
- return nil, errgo.Notef(err, "cannot get %q", waitTokenURL)
- }
- defer httpResp.Body.Close()
- if httpResp.StatusCode != http.StatusOK {
- err := unmarshalError(httpResp)
- return nil, errgo.NoteMask(err, "cannot acquire discharge token", errgo.Any)
- }
- var resp WaitTokenResponse
- if err := httprequest.UnmarshalJSONResponse(httpResp, &resp); err != nil {
- return nil, errgo.Notef(err, "cannot unmarshal wait response")
- }
- tokenVal, err := maybeBase64Decode(resp.Token, resp.Token64)
- if err != nil {
- return nil, errgo.Notef(err, "bad discharge token")
- }
- // TODO check that kind and value are non-empty?
- return &DischargeToken{
- Kind: resp.Kind,
- Value: tokenVal,
- }, nil
-}
-
-// LegacyInteract implements LegacyInteractor by opening a web browser page.
-func (wi WebBrowserInteractor) LegacyInteract(ctx context.Context, client *Client, location string, visitURL *url.URL) error {
- if err := wi.openWebBrowser(visitURL); err != nil {
- return errgo.Mask(err)
- }
- return nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go
deleted file mode 100644
index befc0e17..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "net"
- "net/http"
-
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-type httpRequestKey struct{}
-
-// ContextWithRequest returns the context with information from the
-// given request attached as context. This is used by the httpbakery
-// checkers (see RegisterCheckers for details).
-func ContextWithRequest(ctx context.Context, req *http.Request) context.Context {
- return context.WithValue(ctx, httpRequestKey{}, req)
-}
-
-func requestFromContext(ctx context.Context) *http.Request {
- req, _ := ctx.Value(httpRequestKey{}).(*http.Request)
- return req
-}
-
-const (
- // CondClientIPAddr holds the first party caveat condition
- // that checks a client's IP address.
- CondClientIPAddr = "client-ip-addr"
-
- // CondClientOrigin holds the first party caveat condition that
- // checks a client's origin header.
- CondClientOrigin = "origin"
-)
-
-// CheckersNamespace holds the URI of the HTTP checkers schema.
-const CheckersNamespace = "http"
-
-var allCheckers = map[string]checkers.Func{
- CondClientIPAddr: ipAddrCheck,
- CondClientOrigin: clientOriginCheck,
-}
-
-// RegisterCheckers registers all the HTTP checkers with the given checker.
-// Current checkers include:
-//
-// client-ip-addr
-//
-// The client-ip-addr caveat checks that the HTTP request has
-// the given remote IP address.
-//
-// origin
-//
-// The origin caveat checks that the HTTP Origin header has
-// the given value.
-func RegisterCheckers(c *checkers.Checker) {
- c.Namespace().Register(CheckersNamespace, "http")
- for cond, check := range allCheckers {
- c.Register(cond, CheckersNamespace, check)
- }
-}
-
-// NewChecker returns a new checker with the standard
-// and HTTP checkers registered in it.
-func NewChecker() *checkers.Checker {
- c := checkers.New(nil)
- RegisterCheckers(c)
- return c
-}
-
-// ipAddrCheck implements the IP client address checker
-// for an HTTP request.
-func ipAddrCheck(ctx context.Context, cond, args string) error {
- req := requestFromContext(ctx)
- if req == nil {
- return errgo.Newf("no IP address found in context")
- }
- ip := net.ParseIP(args)
- if ip == nil {
- return errgo.Newf("cannot parse IP address in caveat")
- }
- if req.RemoteAddr == "" {
- return errgo.Newf("client has no remote address")
- }
- reqIP, err := requestIPAddr(req)
- if err != nil {
- return errgo.Mask(err)
- }
- if !reqIP.Equal(ip) {
- return errgo.Newf("client IP address mismatch, got %s", reqIP)
- }
- return nil
-}
-
-// clientOriginCheck implements the Origin header checker
-// for an HTTP request.
-func clientOriginCheck(ctx context.Context, cond, args string) error {
- req := requestFromContext(ctx)
- if req == nil {
- return errgo.Newf("no origin found in context")
- }
- // Note that web browsers may not provide the origin header when it's
- // not a cross-site request with a GET method. There's nothing we
- // can do about that, so just allow all requests with an empty origin.
- if reqOrigin := req.Header.Get("Origin"); reqOrigin != "" && reqOrigin != args {
- return errgo.Newf("request has invalid Origin header; got %q", reqOrigin)
- }
- return nil
-}
-
-// SameClientIPAddrCaveat returns a caveat that will check that
-// the remote IP address is the same as that in the given HTTP request.
-func SameClientIPAddrCaveat(req *http.Request) checkers.Caveat {
- if req.RemoteAddr == "" {
- return checkers.ErrorCaveatf("client has no remote IP address")
- }
- ip, err := requestIPAddr(req)
- if err != nil {
- return checkers.ErrorCaveatf("%v", err)
- }
- return ClientIPAddrCaveat(ip)
-}
-
-// ClientIPAddrCaveat returns a caveat that will check whether the
-// client's IP address is as provided.
-func ClientIPAddrCaveat(addr net.IP) checkers.Caveat {
- if len(addr) != net.IPv4len && len(addr) != net.IPv6len {
- return checkers.ErrorCaveatf("bad IP address %d", []byte(addr))
- }
- return httpCaveat(CondClientIPAddr, addr.String())
-}
-
-// ClientOriginCaveat returns a caveat that will check whether the
-// client's Origin header in its HTTP request is as provided.
-func ClientOriginCaveat(origin string) checkers.Caveat {
- return httpCaveat(CondClientOrigin, origin)
-}
-
-func httpCaveat(cond, arg string) checkers.Caveat {
- return checkers.Caveat{
- Condition: checkers.Condition(cond, arg),
- Namespace: CheckersNamespace,
- }
-}
-
-func requestIPAddr(req *http.Request) (net.IP, error) {
- reqHost, _, err := net.SplitHostPort(req.RemoteAddr)
- if err != nil {
- return nil, errgo.Newf("cannot parse host port in remote address: %v", err)
- }
- ip := net.ParseIP(reqHost)
- if ip == nil {
- return nil, errgo.Newf("invalid IP address in remote address %q", req.RemoteAddr)
- }
- return ip, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go
deleted file mode 100644
index 212f57f0..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go
+++ /dev/null
@@ -1,727 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "net/http"
- "net/http/cookiejar"
- "net/url"
- "strings"
- "time"
-
- "golang.org/x/net/publicsuffix"
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-var unmarshalError = httprequest.ErrorUnmarshaler(&Error{})
-
-// maxDischargeRetries holds the maximum number of times that an HTTP
-// request will be retried after a third party caveat has been successfully
-// discharged.
-const maxDischargeRetries = 3
-
-// DischargeError represents the error when a third party discharge
-// is refused by a server.
-type DischargeError struct {
- // Reason holds the underlying remote error that caused the
- // discharge to fail.
- Reason *Error
-}
-
-func (e *DischargeError) Error() string {
- return fmt.Sprintf("third party refused discharge: %v", e.Reason)
-}
-
-// IsDischargeError reports whether err is a *DischargeError.
-func IsDischargeError(err error) bool {
- _, ok := err.(*DischargeError)
- return ok
-}
-
-// InteractionError wraps an error returned by a call to visitWebPage.
-type InteractionError struct {
- // Reason holds the actual error returned from visitWebPage.
- Reason error
-}
-
-func (e *InteractionError) Error() string {
- return fmt.Sprintf("cannot start interactive session: %v", e.Reason)
-}
-
-// IsInteractionError reports whether err is an *InteractionError.
-func IsInteractionError(err error) bool {
- _, ok := err.(*InteractionError)
- return ok
-}
-
-// NewHTTPClient returns an http.Client that ensures
-// that headers are sent to the server even when the
-// server redirects a GET request. The returned client
-// also contains an empty in-memory cookie jar.
-//
-// See https://github.com/golang/go/issues/4677
-func NewHTTPClient() *http.Client {
- c := *http.DefaultClient
- c.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- if len(via) >= 10 {
- return fmt.Errorf("too many redirects")
- }
- if len(via) == 0 {
- return nil
- }
- for attr, val := range via[0].Header {
- if attr == "Cookie" {
- // Cookies are added automatically anyway.
- continue
- }
- if _, ok := req.Header[attr]; !ok {
- req.Header[attr] = val
- }
- }
- return nil
- }
- jar, err := cookiejar.New(&cookiejar.Options{
- PublicSuffixList: publicsuffix.List,
- })
- if err != nil {
- panic(err)
- }
- c.Jar = jar
- return &c
-}
-
-// Client holds the context for making HTTP requests
-// that automatically acquire and discharge macaroons.
-type Client struct {
- // Client holds the HTTP client to use. It should have a cookie
- // jar configured, and when redirecting it should preserve the
- // headers (see NewHTTPClient).
- *http.Client
-
- // InteractionMethods holds a slice of supported interaction
- // methods, with preferred methods earlier in the slice.
- // On receiving an interaction-required error when discharging,
- // the Kind method of each Interactor in turn will be called
- // and, if the error indicates that the interaction kind is supported,
- // the Interact method will be called to complete the discharge.
- InteractionMethods []Interactor
-
- // Key holds the client's key. If set, the client will try to
- // discharge third party caveats with the special location
- // "local" by using this key. See bakery.DischargeAllWithKey and
- // bakery.LocalThirdPartyCaveat for more information
- Key *bakery.KeyPair
-
- // Logger is used to log information about client activities.
- // If it is nil, bakery.DefaultLogger("httpbakery") will be used.
- Logger bakery.Logger
-}
-
-// An Interactor represents a way of persuading a discharger
-// that it should grant a discharge macaroon.
-type Interactor interface {
- // Kind returns the interaction method name. This corresponds to the
- // key in the Error.InteractionMethods type.
- Kind() string
-
- // Interact performs the interaction, and returns a token that can be
- // used to acquire the discharge macaroon. The location provides
- // the third party caveat location to make it possible to use
- // relative URLs.
- //
- // If the given interaction isn't supported by the client for
- // the given location, it may return an error with an
- // ErrInteractionMethodNotFound cause which will cause the
- // interactor to be ignored that time.
- Interact(ctx context.Context, client *Client, location string, interactionRequiredErr *Error) (*DischargeToken, error)
-}
-
-// DischargeToken holds a token that is intended
-// to persuade a discharger to discharge a third
-// party caveat.
-type DischargeToken struct {
- // Kind holds the kind of the token. By convention this
- // matches the name of the interaction method used to
- // obtain the token, but that's not required.
- Kind string `json:"kind"`
-
- // Value holds the value of the token.
- Value []byte `json:"value"`
-}
-
-// LegacyInteractor may optionally be implemented by Interactor
-// implementations that implement the legacy interaction-required
-// error protocols.
-type LegacyInteractor interface {
- // LegacyInteract implements the "visit" half of a legacy discharge
- // interaction. The "wait" half will be implemented by httpbakery.
- // The location is the location specified by the third party
- // caveat.
- LegacyInteract(ctx context.Context, client *Client, location string, visitURL *url.URL) error
-}
-
-// NewClient returns a new Client containing an HTTP client
-// created with NewHTTPClient and leaves all other fields zero.
-func NewClient() *Client {
- return &Client{
- Client: NewHTTPClient(),
- }
-}
-
-// AddInteractor is a convenience method that appends the given
-// interactor to c.InteractionMethods.
-// For example, to enable web-browser interaction on
-// a client c, do:
-//
-// c.AddInteractor(httpbakery.WebBrowserWindowInteractor)
-func (c *Client) AddInteractor(i Interactor) {
- c.InteractionMethods = append(c.InteractionMethods, i)
-}
-
-// DischargeAll attempts to acquire discharge macaroons for all the
-// third party caveats in m, and returns a slice containing all
-// of them bound to m.
-//
-// If the discharge fails because a third party refuses to discharge a
-// caveat, the returned error will have a cause of type *DischargeError.
-// If the discharge fails because visitWebPage returns an error,
-// the returned error will have a cause of *InteractionError.
-//
-// The returned macaroon slice will not be stored in the client
-// cookie jar (see SetCookie if you need to do that).
-func (c *Client) DischargeAll(ctx context.Context, m *bakery.Macaroon) (macaroon.Slice, error) {
- return bakery.DischargeAllWithKey(ctx, m, c.AcquireDischarge, c.Key)
-}
-
-// DischargeAllUnbound is like DischargeAll except that it does not
-// bind the resulting macaroons.
-func (c *Client) DischargeAllUnbound(ctx context.Context, ms bakery.Slice) (bakery.Slice, error) {
- return ms.DischargeAll(ctx, c.AcquireDischarge, c.Key)
-}
-
-// Do is like DoWithContext, except the context is automatically derived.
-// If using go version 1.7 or later the context will be taken from the
-// given request, otherwise context.Background() will be used.
-func (c *Client) Do(req *http.Request) (*http.Response, error) {
- return c.do(contextFromRequest(req), req, nil)
-}
-
-// DoWithContext sends the given HTTP request and returns its response.
-// If the request fails with a discharge-required error, any required
-// discharge macaroons will be acquired, and the request will be repeated
-// with those attached.
-//
-// If the required discharges were refused by a third party, an error
-// with a *DischargeError cause will be returned.
-//
-// If interaction is required by the user, the client's InteractionMethods
-// will be used to perform interaction. An error
-// with a *InteractionError cause will be returned if this interaction
-// fails. See WebBrowserWindowInteractor for a possible implementation of
-// an Interactor for an interaction method.
-//
-// DoWithContext may add headers to req.Header.
-func (c *Client) DoWithContext(ctx context.Context, req *http.Request) (*http.Response, error) {
- return c.do(ctx, req, nil)
-}
-
-// DoWithCustomError is like Do except it allows a client
-// to specify a custom error function, getError, which is called on the
-// HTTP response and may return a non-nil error if the response holds an
-// error. If the cause of the returned error is a *Error value and its
-// code is ErrDischargeRequired, the macaroon in its Info field will be
-// discharged and the request will be repeated with the discharged
-// macaroon. If getError returns nil, it should leave the response body
-// unchanged.
-//
-// If getError is nil, DefaultGetError will be used.
-//
-// This method can be useful when dealing with APIs that
-// return their errors in a format incompatible with Error, but the
-// need for it should be avoided when creating new APIs,
-// as it makes the endpoints less amenable to generic tools.
-func (c *Client) DoWithCustomError(req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) {
- return c.do(contextFromRequest(req), req, getError)
-}
-
-func (c *Client) do(ctx context.Context, req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) {
- c.logDebugf(ctx, "client do %s %s {", req.Method, req.URL)
- resp, err := c.do1(ctx, req, getError)
- c.logDebugf(ctx, "} -> error %#v", err)
- return resp, err
-}
-
-func (c *Client) do1(ctx context.Context, req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) {
- if getError == nil {
- getError = DefaultGetError
- }
- if c.Client.Jar == nil {
- return nil, errgo.New("no cookie jar supplied in HTTP client")
- }
- rreq, ok := newRetryableRequest(c.Client, req)
- if !ok {
- return nil, fmt.Errorf("request body is not seekable")
- }
- defer rreq.close()
-
- req.Header.Set(BakeryProtocolHeader, fmt.Sprint(bakery.LatestVersion))
-
- // Make several attempts to do the request, because we might have
- // to get through several layers of security. We only retry if
- // we get a DischargeRequiredError and succeed in discharging
- // the macaroon in it.
- retry := 0
- for {
- resp, err := c.do2(ctx, rreq, getError)
- if err == nil || !isDischargeRequiredError(err) {
- return resp, errgo.Mask(err, errgo.Any)
- }
- if retry++; retry > maxDischargeRetries {
- return nil, errgo.NoteMask(err, fmt.Sprintf("too many (%d) discharge requests", retry-1), errgo.Any)
- }
- if err1 := c.HandleError(ctx, req.URL, err); err1 != nil {
- return nil, errgo.Mask(err1, errgo.Any)
- }
- c.logDebugf(ctx, "discharge succeeded; retry %d", retry)
- }
-}
-
-func (c *Client) do2(ctx context.Context, rreq *retryableRequest, getError func(resp *http.Response) error) (*http.Response, error) {
- httpResp, err := rreq.do(ctx)
- if err != nil {
- return nil, errgo.Mask(err, errgo.Any)
- }
- err = getError(httpResp)
- if err == nil {
- c.logInfof(ctx, "HTTP response OK (status %v)", httpResp.Status)
- return httpResp, nil
- }
- httpResp.Body.Close()
- return nil, errgo.Mask(err, errgo.Any)
-}
-
-// HandleError tries to resolve the given error, which should be a
-// response to the given URL, by discharging any macaroon contained in
-// it. That is, if the error cause is an *Error and its code is
-// ErrDischargeRequired, then it will try to discharge
-// err.Info.Macaroon. If the discharge succeeds, the discharged macaroon
-// will be saved to the client's cookie jar and ResolveError will return
-// nil.
-//
-// For any other kind of error, the original error will be returned.
-func (c *Client) HandleError(ctx context.Context, reqURL *url.URL, err error) error {
- respErr, ok := errgo.Cause(err).(*Error)
- if !ok {
- return err
- }
- if respErr.Code != ErrDischargeRequired {
- return respErr
- }
- if respErr.Info == nil || respErr.Info.Macaroon == nil {
- return errgo.New("no macaroon found in discharge-required response")
- }
- mac := respErr.Info.Macaroon
- macaroons, err := bakery.DischargeAllWithKey(ctx, mac, c.AcquireDischarge, c.Key)
- if err != nil {
- return errgo.Mask(err, errgo.Any)
- }
- var cookiePath string
- if path := respErr.Info.MacaroonPath; path != "" {
- relURL, err := parseURLPath(path)
- if err != nil {
- c.logInfof(ctx, "ignoring invalid path in discharge-required response: %v", err)
- } else {
- cookiePath = reqURL.ResolveReference(relURL).Path
- }
- }
- // TODO use a namespace taken from the error response.
- cookie, err := NewCookie(nil, macaroons)
- if err != nil {
- return errgo.Notef(err, "cannot make cookie")
- }
- cookie.Path = cookiePath
- if name := respErr.Info.CookieNameSuffix; name != "" {
- cookie.Name = "macaroon-" + name
- }
- c.Jar.SetCookies(reqURL, []*http.Cookie{cookie})
- return nil
-}
-
-// DefaultGetError is the default error unmarshaler used by Client.Do.
-func DefaultGetError(httpResp *http.Response) error {
- if httpResp.StatusCode != http.StatusProxyAuthRequired && httpResp.StatusCode != http.StatusUnauthorized {
- return nil
- }
- // Check for the new protocol discharge error.
- if httpResp.StatusCode == http.StatusUnauthorized && httpResp.Header.Get("WWW-Authenticate") != "Macaroon" {
- return nil
- }
- if httpResp.Header.Get("Content-Type") != "application/json" {
- return nil
- }
- var resp Error
- if err := json.NewDecoder(httpResp.Body).Decode(&resp); err != nil {
- return fmt.Errorf("cannot unmarshal error response: %v", err)
- }
- return &resp
-}
-
-func parseURLPath(path string) (*url.URL, error) {
- u, err := url.Parse(path)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- if u.Scheme != "" ||
- u.Opaque != "" ||
- u.User != nil ||
- u.Host != "" ||
- u.RawQuery != "" ||
- u.Fragment != "" {
- return nil, errgo.Newf("URL path %q is not clean", path)
- }
- return u, nil
-}
-
-// PermanentExpiryDuration holds the length of time a cookie
-// holding a macaroon with no time-before caveat will be
-// stored.
-const PermanentExpiryDuration = 100 * 365 * 24 * time.Hour
-
-// NewCookie takes a slice of macaroons and returns them
-// encoded as a cookie. The slice should contain a single primary
-// macaroon in its first element, and any discharges after that.
-//
-// The given namespace specifies the first party caveat namespace,
-// used for deriving the expiry time of the cookie.
-func NewCookie(ns *checkers.Namespace, ms macaroon.Slice) (*http.Cookie, error) {
- if len(ms) == 0 {
- return nil, errgo.New("no macaroons in cookie")
- }
- // TODO(rog) marshal cookie as binary if version allows.
- data, err := json.Marshal(ms)
- if err != nil {
- return nil, errgo.Notef(err, "cannot marshal macaroons")
- }
- cookie := &http.Cookie{
- Name: fmt.Sprintf("macaroon-%x", ms[0].Signature()),
- Value: base64.StdEncoding.EncodeToString(data),
- }
- expires, found := checkers.MacaroonsExpiryTime(ns, ms)
- if !found {
- // The macaroon doesn't expire - use a very long expiry
- // time for the cookie.
- expires = time.Now().Add(PermanentExpiryDuration)
- } else if expires.Sub(time.Now()) < time.Minute {
- // The macaroon might have expired already, or it's
- // got a short duration, so treat it as a session cookie
- // by setting Expires to the zero time.
- expires = time.Time{}
- }
- cookie.Expires = expires
- // TODO(rog) other fields.
- return cookie, nil
-}
-
-// SetCookie sets a cookie for the given URL on the given cookie jar
-// that will holds the given macaroon slice. The macaroon slice should
-// contain a single primary macaroon in its first element, and any
-// discharges after that.
-//
-// The given namespace specifies the first party caveat namespace,
-// used for deriving the expiry time of the cookie.
-func SetCookie(jar http.CookieJar, url *url.URL, ns *checkers.Namespace, ms macaroon.Slice) error {
- cookie, err := NewCookie(ns, ms)
- if err != nil {
- return errgo.Mask(err)
- }
- jar.SetCookies(url, []*http.Cookie{cookie})
- return nil
-}
-
-// MacaroonsForURL returns any macaroons associated with the
-// given URL in the given cookie jar.
-func MacaroonsForURL(jar http.CookieJar, u *url.URL) []macaroon.Slice {
- return cookiesToMacaroons(jar.Cookies(u))
-}
-
-func appendURLElem(u, elem string) string {
- if strings.HasSuffix(u, "/") {
- return u + elem
- }
- return u + "/" + elem
-}
-
-// AcquireDischarge acquires a discharge macaroon from the caveat location as an HTTP URL.
-// It fits the getDischarge argument type required by bakery.DischargeAll.
-func (c *Client) AcquireDischarge(ctx context.Context, cav macaroon.Caveat, payload []byte) (*bakery.Macaroon, error) {
- m, err := c.acquireDischarge(ctx, cav, payload, nil)
- if err == nil {
- return m, nil
- }
- cause, ok := errgo.Cause(err).(*Error)
- if !ok {
- return nil, errgo.NoteMask(err, "cannot acquire discharge", IsInteractionError)
- }
- if cause.Code != ErrInteractionRequired {
- return nil, &DischargeError{
- Reason: cause,
- }
- }
- if cause.Info == nil {
- return nil, errgo.Notef(err, "interaction-required response with no info")
- }
- // Make sure the location has a trailing slash so that
- // the relative URL calculations work correctly even when
- // cav.Location doesn't have a trailing slash.
- loc := appendURLElem(cav.Location, "")
- token, m, err := c.interact(ctx, loc, cause, payload)
- if err != nil {
- return nil, errgo.Mask(err, IsDischargeError, IsInteractionError)
- }
- if m != nil {
- // We've acquired the macaroon directly via legacy interaction.
- return m, nil
- }
-
- // Try to acquire the discharge again, but this time with
- // the token acquired by the interaction method.
- m, err = c.acquireDischarge(ctx, cav, payload, token)
- if err != nil {
- return nil, errgo.Mask(err, IsDischargeError, IsInteractionError)
- }
- return m, nil
-}
-
-// acquireDischarge is like AcquireDischarge except that it also
-// takes a token acquired from an interaction method.
-func (c *Client) acquireDischarge(
- ctx context.Context,
- cav macaroon.Caveat,
- payload []byte,
- token *DischargeToken,
-) (*bakery.Macaroon, error) {
- dclient := newDischargeClient(cav.Location, c)
- var req dischargeRequest
- req.Id, req.Id64 = maybeBase64Encode(cav.Id)
- if token != nil {
- req.Token, req.Token64 = maybeBase64Encode(token.Value)
- req.TokenKind = token.Kind
- }
- req.Caveat = base64.RawURLEncoding.EncodeToString(payload)
- resp, err := dclient.Discharge(ctx, &req)
- if err == nil {
- return resp.Macaroon, nil
- }
- return nil, errgo.Mask(err, errgo.Any)
-}
-
-// interact gathers a macaroon by directing the user to interact with a
-// web page. The irErr argument holds the interaction-required
-// error response.
-func (c *Client) interact(ctx context.Context, location string, irErr *Error, payload []byte) (*DischargeToken, *bakery.Macaroon, error) {
- if len(c.InteractionMethods) == 0 {
- return nil, nil, &InteractionError{
- Reason: errgo.New("interaction required but not possible"),
- }
- }
- if irErr.Info.InteractionMethods == nil && irErr.Info.LegacyVisitURL != "" {
- // It's an old-style error; deal with it differently.
- m, err := c.legacyInteract(ctx, location, irErr)
- if err != nil {
- return nil, nil, errgo.Mask(err, IsDischargeError, IsInteractionError)
- }
- return nil, m, nil
- }
- for _, interactor := range c.InteractionMethods {
- c.logDebugf(ctx, "checking interaction method %q", interactor.Kind())
- if _, ok := irErr.Info.InteractionMethods[interactor.Kind()]; ok {
- c.logDebugf(ctx, "found possible interaction method %q", interactor.Kind())
- token, err := interactor.Interact(ctx, c, location, irErr)
- if err != nil {
- if errgo.Cause(err) == ErrInteractionMethodNotFound {
- continue
- }
- return nil, nil, errgo.Mask(err, IsDischargeError, IsInteractionError)
- }
- if token == nil {
- return nil, nil, errgo.New("interaction method returned an empty token")
- }
- return token, nil, nil
- } else {
- c.logDebugf(ctx, "interaction method %q not found in %#v", interactor.Kind(), irErr.Info.InteractionMethods)
- }
- }
- return nil, nil, &InteractionError{
- Reason: errgo.Newf("no supported interaction method"),
- }
-}
-
-func (c *Client) legacyInteract(ctx context.Context, location string, irErr *Error) (*bakery.Macaroon, error) {
- visitURL, err := relativeURL(location, irErr.Info.LegacyVisitURL)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- waitURL, err := relativeURL(location, irErr.Info.LegacyWaitURL)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- methodURLs := map[string]*url.URL{
- "interactive": visitURL,
- }
- if len(c.InteractionMethods) > 1 || c.InteractionMethods[0].Kind() != WebBrowserInteractionKind {
- // We have several possible methods or we only support a non-window
- // method, so we need to fetch the possible methods supported by the discharger.
- methodURLs = legacyGetInteractionMethods(ctx, c.logger(), c, visitURL)
- }
- for _, interactor := range c.InteractionMethods {
- kind := interactor.Kind()
- if kind == WebBrowserInteractionKind {
- // This is the old name for browser-window interaction.
- kind = "interactive"
- }
- interactor, ok := interactor.(LegacyInteractor)
- if !ok {
- // Legacy interaction mode isn't supported.
- continue
- }
- visitURL, ok := methodURLs[kind]
- if !ok {
- continue
- }
- visitURL, err := relativeURL(location, visitURL.String())
- if err != nil {
- return nil, errgo.Mask(err)
- }
- if err := interactor.LegacyInteract(ctx, c, location, visitURL); err != nil {
- return nil, &InteractionError{
- Reason: errgo.Mask(err, errgo.Any),
- }
- }
- return waitForMacaroon(ctx, c, waitURL)
- }
- return nil, &InteractionError{
- Reason: errgo.Newf("no methods supported"),
- }
-}
-
-func (c *Client) logDebugf(ctx context.Context, f string, a ...interface{}) {
- c.logger().Debugf(ctx, f, a...)
-}
-
-func (c *Client) logInfof(ctx context.Context, f string, a ...interface{}) {
- c.logger().Infof(ctx, f, a...)
-}
-
-func (c *Client) logger() bakery.Logger {
- if c.Logger != nil {
- return c.Logger
- }
- return bakery.DefaultLogger("httpbakery")
-}
-
-// waitForMacaroon returns a macaroon from a legacy wait endpoint.
-func waitForMacaroon(ctx context.Context, client *Client, waitURL *url.URL) (*bakery.Macaroon, error) {
- req, err := http.NewRequest("GET", waitURL.String(), nil)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- req = req.WithContext(ctx)
- httpResp, err := client.Client.Do(req)
- if err != nil {
- return nil, errgo.Notef(err, "cannot get %q", waitURL)
- }
- defer httpResp.Body.Close()
- if httpResp.StatusCode != http.StatusOK {
- err := unmarshalError(httpResp)
- if err1, ok := err.(*Error); ok {
- err = &DischargeError{
- Reason: err1,
- }
- }
- return nil, errgo.NoteMask(err, "failed to acquire macaroon after waiting", errgo.Any)
- }
- var resp WaitResponse
- if err := httprequest.UnmarshalJSONResponse(httpResp, &resp); err != nil {
- return nil, errgo.Notef(err, "cannot unmarshal wait response")
- }
- return resp.Macaroon, nil
-}
-
-// relativeURL returns newPath relative to an original URL.
-func relativeURL(base, new string) (*url.URL, error) {
- if new == "" {
- return nil, errgo.Newf("empty URL")
- }
- baseURL, err := url.Parse(base)
- if err != nil {
- return nil, errgo.Notef(err, "cannot parse URL")
- }
- newURL, err := url.Parse(new)
- if err != nil {
- return nil, errgo.Notef(err, "cannot parse URL")
- }
- return baseURL.ResolveReference(newURL), nil
-}
-
-// TODO(rog) move a lot of the code below into server.go, as it's
-// much more about server side than client side.
-
-// MacaroonsHeader is the key of the HTTP header that can be used to provide a
-// macaroon for request authorization.
-const MacaroonsHeader = "Macaroons"
-
-// RequestMacaroons returns any collections of macaroons from the header and
-// cookies found in the request. By convention, each slice will contain a
-// primary macaroon followed by its discharges.
-func RequestMacaroons(req *http.Request) []macaroon.Slice {
- mss := cookiesToMacaroons(req.Cookies())
- for _, h := range req.Header[MacaroonsHeader] {
- ms, err := decodeMacaroonSlice(h)
- if err != nil {
- // Ignore invalid macaroons.
- continue
- }
- mss = append(mss, ms)
- }
- return mss
-}
-
-// cookiesToMacaroons returns a slice of any macaroons found
-// in the given slice of cookies.
-func cookiesToMacaroons(cookies []*http.Cookie) []macaroon.Slice {
- var mss []macaroon.Slice
- for _, cookie := range cookies {
- if !strings.HasPrefix(cookie.Name, "macaroon-") {
- continue
- }
- ms, err := decodeMacaroonSlice(cookie.Value)
- if err != nil {
- // Ignore invalid macaroons.
- continue
- }
- mss = append(mss, ms)
- }
- return mss
-}
-
-// decodeMacaroonSlice decodes a base64-JSON-encoded slice of macaroons from
-// the given string.
-func decodeMacaroonSlice(value string) (macaroon.Slice, error) {
- data, err := macaroon.Base64Decode([]byte(value))
- if err != nil {
- return nil, errgo.NoteMask(err, "cannot base64-decode macaroons")
- }
- // TODO(rog) accept binary encoded macaroon cookies.
- var ms macaroon.Slice
- if err := json.Unmarshal(data, &ms); err != nil {
- return nil, errgo.NoteMask(err, "cannot unmarshal macaroons")
- }
- return ms, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go
deleted file mode 100644
index 6ae98530..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build go1.7
-
-package httpbakery
-
-import (
- "context"
- "net/http"
-)
-
-func contextFromRequest(req *http.Request) context.Context {
- return req.Context()
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go
deleted file mode 100644
index aecca0d3..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !go1.7
-
-package httpbakery
-
-import (
- "context"
- "net/http"
-)
-
-func contextFromRequest(req *http.Request) context.Context {
- return context.Background()
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go
deleted file mode 100644
index fa88bfa1..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go
+++ /dev/null
@@ -1,367 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "encoding/base64"
- "net/http"
- "path"
- "unicode/utf8"
-
- "github.com/julienschmidt/httprouter"
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// ThirdPartyCaveatChecker is used to check third party caveats.
-// This interface is deprecated and included only for backward
-// compatibility; ThirdPartyCaveatCheckerP should be used instead.
-type ThirdPartyCaveatChecker interface {
- // CheckThirdPartyCaveat is like ThirdPartyCaveatCheckerP.CheckThirdPartyCaveat
- // except that it uses separate arguments instead of a struct arg.
- CheckThirdPartyCaveat(ctx context.Context, info *bakery.ThirdPartyCaveatInfo, req *http.Request, token *DischargeToken) ([]checkers.Caveat, error)
-}
-
-// ThirdPartyCaveatCheckerP is used to check third party caveats.
-// The "P" stands for "Params" - this was added after ThirdPartyCaveatChecker
-// which can't be removed without breaking backwards compatibility.
-type ThirdPartyCaveatCheckerP interface {
- // CheckThirdPartyCaveat is used to check whether a client
- // making the given request should be allowed a discharge for
- // the p.Info.Condition. On success, the caveat will be discharged,
- // with any returned caveats also added to the discharge
- // macaroon.
- //
- // The p.Token field, if non-nil, is a token obtained from
- // Interactor.Interact as the result of a discharge interaction
- // after an interaction required error.
- //
- // Note than when used in the context of a discharge handler
- // created by Discharger, any returned errors will be marshaled
- // as documented in DischargeHandler.ErrorMapper.
- CheckThirdPartyCaveat(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error)
-}
-
-// ThirdPartyCaveatCheckerParams holds the parameters passed to
-// CheckThirdPartyCaveatP.
-type ThirdPartyCaveatCheckerParams struct {
- // Caveat holds information about the caveat being discharged.
- Caveat *bakery.ThirdPartyCaveatInfo
-
- // Token holds the discharge token provided by the client, if any.
- Token *DischargeToken
-
- // Req holds the HTTP discharge request.
- Request *http.Request
-
- // Response holds the HTTP response writer. Implementations
- // must not call its WriteHeader or Write methods.
- Response http.ResponseWriter
-}
-
-// ThirdPartyCaveatCheckerFunc implements ThirdPartyCaveatChecker
-// by calling a function.
-type ThirdPartyCaveatCheckerFunc func(ctx context.Context, req *http.Request, info *bakery.ThirdPartyCaveatInfo, token *DischargeToken) ([]checkers.Caveat, error)
-
-func (f ThirdPartyCaveatCheckerFunc) CheckThirdPartyCaveat(ctx context.Context, info *bakery.ThirdPartyCaveatInfo, req *http.Request, token *DischargeToken) ([]checkers.Caveat, error) {
- return f(ctx, req, info, token)
-}
-
-// ThirdPartyCaveatCheckerPFunc implements ThirdPartyCaveatCheckerP
-// by calling a function.
-type ThirdPartyCaveatCheckerPFunc func(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error)
-
-func (f ThirdPartyCaveatCheckerPFunc) CheckThirdPartyCaveat(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error) {
- return f(ctx, p)
-}
-
-// newDischargeClient returns a discharge client that addresses the
-// third party discharger at the given location URL and uses
-// the given client to make HTTP requests.
-//
-// If client is nil, http.DefaultClient is used.
-func newDischargeClient(location string, client httprequest.Doer) *dischargeClient {
- if client == nil {
- client = http.DefaultClient
- }
- return &dischargeClient{
- Client: httprequest.Client{
- BaseURL: location,
- Doer: client,
- UnmarshalError: unmarshalError,
- },
- }
-}
-
-// Discharger holds parameters for creating a new Discharger.
-type DischargerParams struct {
- // CheckerP is used to actually check the caveats.
- // This will be used in preference to Checker.
- CheckerP ThirdPartyCaveatCheckerP
-
- // Checker is used to actually check the caveats.
- // This should be considered deprecated and will be ignored if CheckerP is set.
- Checker ThirdPartyCaveatChecker
-
- // Key holds the key pair of the discharger.
- Key *bakery.KeyPair
-
- // Locator is used to find public keys when adding
- // third-party caveats on discharge macaroons.
- // If this is nil, no third party caveats may be added.
- Locator bakery.ThirdPartyLocator
-
- // ErrorToResponse is used to convert errors returned by the third
- // party caveat checker to the form that will be JSON-marshaled
- // on the wire. If zero, this defaults to ErrorToResponse.
- // If set, it should handle errors that it does not understand
- // by falling back to calling ErrorToResponse to ensure
- // that the standard bakery errors are marshaled in the expected way.
- ErrorToResponse func(ctx context.Context, err error) (int, interface{})
-}
-
-// Discharger represents a third-party caveat discharger.
-// can discharge caveats in an HTTP server.
-//
-// The name space served by dischargers is as follows.
-// All parameters can be provided either as URL attributes
-// or form attributes. The result is always formatted as a JSON
-// object.
-//
-// On failure, all endpoints return an error described by
-// the Error type.
-//
-// POST /discharge
-// params:
-// id: all-UTF-8 third party caveat id
-// id64: non-padded URL-base64 encoded caveat id
-// macaroon-id: (optional) id to give to discharge macaroon (defaults to id)
-// token: (optional) value of discharge token
-// token64: (optional) base64-encoded value of discharge token.
-// token-kind: (mandatory if token or token64 provided) discharge token kind.
-// result on success (http.StatusOK):
-// {
-// Macaroon *macaroon.Macaroon
-// }
-//
-// GET /publickey
-// result:
-// public key of service
-// expiry time of key
-type Discharger struct {
- p DischargerParams
-}
-
-// NewDischarger returns a new third-party caveat discharger
-// using the given parameters.
-func NewDischarger(p DischargerParams) *Discharger {
- if p.ErrorToResponse == nil {
- p.ErrorToResponse = ErrorToResponse
- }
- if p.Locator == nil {
- p.Locator = emptyLocator{}
- }
- if p.CheckerP == nil {
- p.CheckerP = ThirdPartyCaveatCheckerPFunc(func(ctx context.Context, cp ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error) {
- return p.Checker.CheckThirdPartyCaveat(ctx, cp.Caveat, cp.Request, cp.Token)
- })
- }
- return &Discharger{
- p: p,
- }
-}
-
-type emptyLocator struct{}
-
-func (emptyLocator) ThirdPartyInfo(ctx context.Context, loc string) (bakery.ThirdPartyInfo, error) {
- return bakery.ThirdPartyInfo{}, bakery.ErrNotFound
-}
-
-// AddMuxHandlers adds handlers to the given ServeMux to provide
-// a third-party caveat discharge service.
-func (d *Discharger) AddMuxHandlers(mux *http.ServeMux, rootPath string) {
- for _, h := range d.Handlers() {
- // Note: this only works because we don't have any wildcard
- // patterns in the discharger paths.
- mux.Handle(path.Join(rootPath, h.Path), mkHTTPHandler(h.Handle))
- }
-}
-
-// Handlers returns a slice of handlers that can handle a third-party
-// caveat discharge service when added to an httprouter.Router.
-// TODO provide some way of customizing the context so that
-// ErrorToResponse can see a request-specific context.
-func (d *Discharger) Handlers() []httprequest.Handler {
- f := func(p httprequest.Params) (dischargeHandler, context.Context, error) {
- return dischargeHandler{
- discharger: d,
- }, p.Context, nil
- }
- srv := httprequest.Server{
- ErrorMapper: d.p.ErrorToResponse,
- }
- return srv.Handlers(f)
-}
-
-//go:generate httprequest-generate-client github.com/go-macaroon-bakery/macaroon-bakery/v3-unstable/httpbakery dischargeHandler dischargeClient
-
-// dischargeHandler is the type used to define the httprequest handler
-// methods for a discharger.
-type dischargeHandler struct {
- discharger *Discharger
-}
-
-// dischargeRequest is a request to create a macaroon that discharges the
-// supplied third-party caveat. Discharging caveats will normally be
-// handled by the bakery it would be unusual to use this type directly in
-// client software.
-type dischargeRequest struct {
- httprequest.Route `httprequest:"POST /discharge"`
- Id string `httprequest:"id,form,omitempty"`
- Id64 string `httprequest:"id64,form,omitempty"`
- Caveat string `httprequest:"caveat64,form,omitempty"`
- Token string `httprequest:"token,form,omitempty"`
- Token64 string `httprequest:"token64,form,omitempty"`
- TokenKind string `httprequest:"token-kind,form,omitempty"`
-}
-
-// dischargeResponse contains the response from a /discharge POST request.
-type dischargeResponse struct {
- Macaroon *bakery.Macaroon `json:",omitempty"`
-}
-
-// Discharge discharges a third party caveat.
-func (h dischargeHandler) Discharge(p httprequest.Params, r *dischargeRequest) (*dischargeResponse, error) {
- id, err := maybeBase64Decode(r.Id, r.Id64)
- if err != nil {
- return nil, errgo.Notef(err, "bad caveat id")
- }
- var caveat []byte
- if r.Caveat != "" {
- // Note that it's important that when r.Caveat is empty,
- // we leave DischargeParams.Caveat as nil (Base64Decode
- // always returns a non-nil byte slice).
- caveat1, err := macaroon.Base64Decode([]byte(r.Caveat))
- if err != nil {
- return nil, errgo.Notef(err, "bad base64-encoded caveat: %v", err)
- }
- caveat = caveat1
- }
- tokenVal, err := maybeBase64Decode(r.Token, r.Token64)
- if err != nil {
- return nil, errgo.Notef(err, "bad discharge token")
- }
- var token *DischargeToken
- if len(tokenVal) != 0 {
- if r.TokenKind == "" {
- return nil, errgo.Notef(err, "discharge token provided without token kind")
- }
- token = &DischargeToken{
- Kind: r.TokenKind,
- Value: tokenVal,
- }
- }
- m, err := bakery.Discharge(p.Context, bakery.DischargeParams{
- Id: id,
- Caveat: caveat,
- Key: h.discharger.p.Key,
- Checker: bakery.ThirdPartyCaveatCheckerFunc(
- func(ctx context.Context, cav *bakery.ThirdPartyCaveatInfo) ([]checkers.Caveat, error) {
- return h.discharger.p.CheckerP.CheckThirdPartyCaveat(ctx, ThirdPartyCaveatCheckerParams{
- Caveat: cav,
- Request: p.Request,
- Response: p.Response,
- Token: token,
- })
- },
- ),
- Locator: h.discharger.p.Locator,
- })
- if err != nil {
- return nil, errgo.NoteMask(err, "cannot discharge", errgo.Any)
- }
- return &dischargeResponse{m}, nil
-}
-
-// publicKeyRequest specifies the /publickey endpoint.
-type publicKeyRequest struct {
- httprequest.Route `httprequest:"GET /publickey"`
-}
-
-// publicKeyResponse is the response to a /publickey GET request.
-type publicKeyResponse struct {
- PublicKey *bakery.PublicKey
-}
-
-// dischargeInfoRequest specifies the /discharge/info endpoint.
-type dischargeInfoRequest struct {
- httprequest.Route `httprequest:"GET /discharge/info"`
-}
-
-// dischargeInfoResponse is the response to a /discharge/info GET
-// request.
-type dischargeInfoResponse struct {
- PublicKey *bakery.PublicKey
- Version bakery.Version
-}
-
-// PublicKey returns the public key of the discharge service.
-func (h dischargeHandler) PublicKey(*publicKeyRequest) (publicKeyResponse, error) {
- return publicKeyResponse{
- PublicKey: &h.discharger.p.Key.Public,
- }, nil
-}
-
-// DischargeInfo returns information on the discharger.
-func (h dischargeHandler) DischargeInfo(*dischargeInfoRequest) (dischargeInfoResponse, error) {
- return dischargeInfoResponse{
- PublicKey: &h.discharger.p.Key.Public,
- Version: bakery.LatestVersion,
- }, nil
-}
-
-// mkHTTPHandler converts an httprouter handler to an http.Handler,
-// assuming that the httprouter handler has no wildcard path
-// parameters.
-func mkHTTPHandler(h httprouter.Handle) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- h(w, req, nil)
- })
-}
-
-// maybeBase64Encode encodes b as is if it's
-// OK to be passed as a URL form parameter,
-// or encoded as base64 otherwise.
-func maybeBase64Encode(b []byte) (s, s64 string) {
- if utf8.Valid(b) {
- valid := true
- for _, c := range b {
- if c < 32 || c == 127 {
- valid = false
- break
- }
- }
- if valid {
- return string(b), ""
- }
- }
- return "", base64.RawURLEncoding.EncodeToString(b)
-}
-
-// maybeBase64Decode implements the inverse of maybeBase64Encode.
-func maybeBase64Decode(s, s64 string) ([]byte, error) {
- if s64 != "" {
- data, err := macaroon.Base64Decode([]byte(s64))
- if err != nil {
- return nil, errgo.Mask(err)
- }
- if len(data) == 0 {
- return nil, nil
- }
- return data, nil
- }
- return []byte(s), nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go
deleted file mode 100644
index 3a738f38..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// The code in this file was automatically generated by running httprequest-generate-client.
-// DO NOT EDIT
-
-package httpbakery
-
-import (
- "context"
-
- "gopkg.in/httprequest.v1"
-)
-
-type dischargeClient struct {
- Client httprequest.Client
-}
-
-// Discharge discharges a third party caveat.
-func (c *dischargeClient) Discharge(ctx context.Context, p *dischargeRequest) (*dischargeResponse, error) {
- var r *dischargeResponse
- err := c.Client.Call(ctx, p, &r)
- return r, err
-}
-
-// DischargeInfo returns information on the discharger.
-func (c *dischargeClient) DischargeInfo(ctx context.Context, p *dischargeInfoRequest) (dischargeInfoResponse, error) {
- var r dischargeInfoResponse
- err := c.Client.Call(ctx, p, &r)
- return r, err
-}
-
-// PublicKey returns the public key of the discharge service.
-func (c *dischargeClient) PublicKey(ctx context.Context, p *publicKeyRequest) (publicKeyResponse, error) {
- var r publicKeyResponse
- err := c.Client.Call(ctx, p, &r)
- return r, err
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go
deleted file mode 100644
index 0ccc0794..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go
+++ /dev/null
@@ -1,359 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "strconv"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil"
-)
-
-// ErrorCode holds an error code that classifies
-// an error returned from a bakery HTTP handler.
-type ErrorCode string
-
-func (e ErrorCode) Error() string {
- return string(e)
-}
-
-func (e ErrorCode) ErrorCode() ErrorCode {
- return e
-}
-
-const (
- ErrBadRequest = ErrorCode("bad request")
- ErrDischargeRequired = ErrorCode("macaroon discharge required")
- ErrInteractionRequired = ErrorCode("interaction required")
- ErrInteractionMethodNotFound = ErrorCode("discharger does not provide an supported interaction method")
- ErrPermissionDenied = ErrorCode("permission denied")
-)
-
-var httpReqServer = httprequest.Server{
- ErrorMapper: ErrorToResponse,
-}
-
-// WriteError writes the given bakery error to w.
-func WriteError(ctx context.Context, w http.ResponseWriter, err error) {
- httpReqServer.WriteError(ctx, w, err)
-}
-
-// Error holds the type of a response from an httpbakery HTTP request,
-// marshaled as JSON.
-//
-// Note: Do not construct Error values with ErrDischargeRequired or
-// ErrInteractionRequired codes directly - use the
-// NewDischargeRequiredError or NewInteractionRequiredError
-// functions instead.
-type Error struct {
- Code ErrorCode `json:",omitempty"`
- Message string `json:",omitempty"`
- Info *ErrorInfo `json:",omitempty"`
-
- // version holds the protocol version that was used
- // to create the error (see NewDischargeRequiredError).
- version bakery.Version
-}
-
-// ErrorInfo holds additional information provided
-// by an error.
-type ErrorInfo struct {
- // Macaroon may hold a macaroon that, when
- // discharged, may allow access to a service.
- // This field is associated with the ErrDischargeRequired
- // error code.
- Macaroon *bakery.Macaroon `json:",omitempty"`
-
- // MacaroonPath holds the URL path to be associated
- // with the macaroon. The macaroon is potentially
- // valid for all URLs under the given path.
- // If it is empty, the macaroon will be associated with
- // the original URL from which the error was returned.
- MacaroonPath string `json:",omitempty"`
-
- // CookieNameSuffix holds the desired cookie name suffix to be
- // associated with the macaroon. The actual name used will be
- // ("macaroon-" + CookieName). Clients may ignore this field -
- // older clients will always use ("macaroon-" +
- // macaroon.Signature() in hex).
- CookieNameSuffix string `json:",omitempty"`
-
- // The following fields are associated with the
- // ErrInteractionRequired error code.
-
- // InteractionMethods holds the set of methods that the
- // third party supports for completing the discharge.
- // See InteractionMethod for a more convenient
- // accessor method.
- InteractionMethods map[string]*json.RawMessage `json:",omitempty"`
-
- // LegacyVisitURL holds a URL that the client should visit
- // in a web browser to authenticate themselves.
- // This is deprecated - it is superceded by the InteractionMethods
- // field.
- LegacyVisitURL string `json:"VisitURL,omitempty"`
-
- // LegacyWaitURL holds a URL that the client should visit
- // to acquire the discharge macaroon. A GET on
- // this URL will block until the client has authenticated,
- // and then it will return the discharge macaroon.
- // This is deprecated - it is superceded by the InteractionMethods
- // field.
- LegacyWaitURL string `json:"WaitURL,omitempty"`
-}
-
-// SetInteraction sets the information for a particular
-// interaction kind to v. The error should be an interaction-required
-// error. This method will panic if v cannot be JSON-marshaled.
-// It is expected that interaction implementations will
-// implement type-safe wrappers for this method,
-// so you should not need to call it directly.
-func (e *Error) SetInteraction(kind string, v interface{}) {
- if e.Info == nil {
- e.Info = new(ErrorInfo)
- }
- if e.Info.InteractionMethods == nil {
- e.Info.InteractionMethods = make(map[string]*json.RawMessage)
- }
- data, err := json.Marshal(v)
- if err != nil {
- panic(err)
- }
- m := json.RawMessage(data)
- e.Info.InteractionMethods[kind] = &m
-}
-
-// InteractionMethod checks whether the error is an InteractionRequired error
-// that implements the method with the given name, and JSON-unmarshals the
-// method-specific data into x.
-func (e *Error) InteractionMethod(kind string, x interface{}) error {
- if e.Info == nil || e.Code != ErrInteractionRequired {
- return errgo.Newf("not an interaction-required error (code %v)", e.Code)
- }
- entry := e.Info.InteractionMethods[kind]
- if entry == nil {
- return errgo.WithCausef(nil, ErrInteractionMethodNotFound, "interaction method %q not found", kind)
- }
- if err := json.Unmarshal(*entry, x); err != nil {
- return errgo.Notef(err, "cannot unmarshal data for interaction method %q", kind)
- }
- return nil
-}
-
-func (e *Error) Error() string {
- return e.Message
-}
-
-func (e *Error) ErrorCode() ErrorCode {
- return e.Code
-}
-
-// ErrorInfo returns additional information
-// about the error.
-// TODO return interface{} here?
-func (e *Error) ErrorInfo() *ErrorInfo {
- return e.Info
-}
-
-// ErrorToResponse returns the HTTP status and an error body to be
-// marshaled as JSON for the given error. This allows a third party
-// package to integrate bakery errors into their error responses when
-// they encounter an error with a *bakery.Error cause.
-func ErrorToResponse(ctx context.Context, err error) (int, interface{}) {
- errorBody := errorResponseBody(err)
- var body interface{} = errorBody
- status := http.StatusInternalServerError
- switch errorBody.Code {
- case ErrBadRequest:
- status = http.StatusBadRequest
- case ErrPermissionDenied:
- status = http.StatusUnauthorized
- case ErrDischargeRequired, ErrInteractionRequired:
- switch errorBody.version {
- case bakery.Version0:
- status = http.StatusProxyAuthRequired
- case bakery.Version1, bakery.Version2, bakery.Version3:
- status = http.StatusUnauthorized
- body = httprequest.CustomHeader{
- Body: body,
- SetHeaderFunc: setAuthenticateHeader,
- }
- default:
- panic(fmt.Sprintf("out of range version number %v", errorBody.version))
- }
- }
- return status, body
-}
-
-func setAuthenticateHeader(h http.Header) {
- h.Set("WWW-Authenticate", "Macaroon")
-}
-
-type errorInfoer interface {
- ErrorInfo() *ErrorInfo
-}
-
-type errorCoder interface {
- ErrorCode() ErrorCode
-}
-
-// errorResponse returns an appropriate error
-// response for the provided error.
-func errorResponseBody(err error) *Error {
- var errResp Error
- cause := errgo.Cause(err)
- if cause, ok := cause.(*Error); ok {
- // It's an Error already. Preserve the wrapped
- // error message but copy everything else.
- errResp = *cause
- errResp.Message = err.Error()
- return &errResp
- }
-
- // It's not an error. Preserve as much info as
- // we can find.
- errResp.Message = err.Error()
- if coder, ok := cause.(errorCoder); ok {
- errResp.Code = coder.ErrorCode()
- }
- if infoer, ok := cause.(errorInfoer); ok {
- errResp.Info = infoer.ErrorInfo()
- }
- return &errResp
-}
-
-// NewInteractionRequiredError returns an error of type *Error
-// that requests an interaction from the client in response
-// to the given request. The originalErr value describes the original
-// error - if it is nil, a default message will be provided.
-//
-// This function should be used in preference to creating the Error value
-// directly, as it sets the bakery protocol version correctly in the error.
-//
-// The returned error does not support any interaction kinds.
-// Use kind-specific SetInteraction methods (for example
-// WebBrowserInteractor.SetInteraction) to add supported
-// interaction kinds.
-//
-// Note that WebBrowserInteractor.SetInteraction should always be called
-// for legacy clients to maintain backwards compatibility.
-func NewInteractionRequiredError(originalErr error, req *http.Request) *Error {
- if originalErr == nil {
- originalErr = ErrInteractionRequired
- }
- return &Error{
- Message: originalErr.Error(),
- version: RequestVersion(req),
- Code: ErrInteractionRequired,
- }
-}
-
-type DischargeRequiredErrorParams struct {
- // Macaroon holds the macaroon that needs to be discharged
- // by the client.
- Macaroon *bakery.Macaroon
-
- // OriginalError holds the reason that the discharge-required
- // error was created. If it's nil, ErrDischargeRequired will
- // be used.
- OriginalError error
-
- // CookiePath holds the path for the client to give the cookie
- // holding the discharged macaroon. If it's empty, then a
- // relative path from the request URL path to / will be used if
- // Request is provided, or "/" otherwise.
- CookiePath string
-
- // CookieNameSuffix holds the suffix for the client
- // to give the cookie holding the discharged macaroon
- // (after the "macaroon-" prefix).
- // If it's empty, "auth" will be used.
- CookieNameSuffix string
-
- // Request holds the request that the error is in response to.
- // It is used to form the cookie path if CookiePath is empty.
- Request *http.Request
-}
-
-// NewDischargeRequiredErrorWithVersion returns an error of type *Error
-// that contains a macaroon to the client and acts as a request that the
-// macaroon be discharged to authorize the request.
-//
-// The client is responsible for discharging the macaroon and
-// storing it as a cookie (or including it as a Macaroon header)
-// to be used for the subsequent request.
-func NewDischargeRequiredError(p DischargeRequiredErrorParams) error {
- if p.OriginalError == nil {
- p.OriginalError = ErrDischargeRequired
- }
- if p.CookiePath == "" {
- p.CookiePath = "/"
- if p.Request != nil {
- path, err := httputil.RelativeURLPath(p.Request.URL.Path, "/")
- if err == nil {
- p.CookiePath = path
- }
- }
- }
- if p.CookieNameSuffix == "" {
- p.CookieNameSuffix = "auth"
- }
- return &Error{
- version: p.Macaroon.Version(),
- Message: p.OriginalError.Error(),
- Code: ErrDischargeRequired,
- Info: &ErrorInfo{
- Macaroon: p.Macaroon,
- MacaroonPath: p.CookiePath,
- CookieNameSuffix: p.CookieNameSuffix,
- },
- }
-}
-
-// BakeryProtocolHeader is the header that HTTP clients should set
-// to determine the bakery protocol version. If it is 0 or missing,
-// a discharge-required error response will be returned with HTTP status 407;
-// if it is 1, the response will have status 401 with the WWW-Authenticate
-// header set to "Macaroon".
-const BakeryProtocolHeader = "Bakery-Protocol-Version"
-
-// RequestVersion determines the bakery protocol version from a client
-// request. If the protocol cannot be determined, or is invalid, the
-// original version of the protocol is used. If a later version is
-// found, the latest known version is used, which is OK because versions
-// are backwardly compatible.
-//
-// TODO as there are no known version 0 clients, default to version 1
-// instead.
-func RequestVersion(req *http.Request) bakery.Version {
- vs := req.Header.Get(BakeryProtocolHeader)
- if vs == "" {
- // No header - use backward compatibility mode.
- return bakery.Version0
- }
- x, err := strconv.Atoi(vs)
- if err != nil || x < 0 {
- // Badly formed header - use backward compatibility mode.
- return bakery.Version0
- }
- v := bakery.Version(x)
- if v > bakery.LatestVersion {
- // Later version than we know about - use the
- // latest version that we can.
- return bakery.LatestVersion
- }
- return v
-}
-
-func isDischargeRequiredError(err error) bool {
- respErr, ok := errgo.Cause(err).(*Error)
- if !ok {
- return false
- }
- return respErr.Code == ErrDischargeRequired
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go
deleted file mode 100644
index b22610bb..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "net/http"
- "net/url"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
-)
-
-var _ bakery.ThirdPartyLocator = (*ThirdPartyLocator)(nil)
-
-// NewThirdPartyLocator returns a new third party
-// locator that uses the given client to find
-// information about third parties and
-// uses the given cache as a backing.
-//
-// If cache is nil, a new cache will be created.
-//
-// If client is nil, http.DefaultClient will be used.
-func NewThirdPartyLocator(client httprequest.Doer, cache *bakery.ThirdPartyStore) *ThirdPartyLocator {
- if cache == nil {
- cache = bakery.NewThirdPartyStore()
- }
- if client == nil {
- client = http.DefaultClient
- }
- return &ThirdPartyLocator{
- client: client,
- cache: cache,
- }
-}
-
-// AllowInsecureThirdPartyLocator holds whether ThirdPartyLocator allows
-// insecure HTTP connections for fetching third party information.
-// It is provided for testing purposes and should not be used
-// in production code.
-var AllowInsecureThirdPartyLocator = false
-
-// ThirdPartyLocator represents locator that can interrogate
-// third party discharge services for information. By default it refuses
-// to use insecure URLs.
-type ThirdPartyLocator struct {
- client httprequest.Doer
- allowInsecure bool
- cache *bakery.ThirdPartyStore
-}
-
-// AllowInsecure allows insecure URLs. This can be useful
-// for testing purposes. See also AllowInsecureThirdPartyLocator.
-func (kr *ThirdPartyLocator) AllowInsecure() {
- kr.allowInsecure = true
-}
-
-// ThirdPartyLocator implements bakery.ThirdPartyLocator
-// by first looking in the backing cache and, if that fails,
-// making an HTTP request to find the information associated
-// with the given discharge location.
-//
-// It refuses to fetch information from non-HTTPS URLs.
-func (kr *ThirdPartyLocator) ThirdPartyInfo(ctx context.Context, loc string) (bakery.ThirdPartyInfo, error) {
- // If the cache has an entry in, we can use it regardless of URL scheme.
- // This allows entries for notionally insecure URLs to be added by other means (for
- // example via a config file).
- info, err := kr.cache.ThirdPartyInfo(ctx, loc)
- if err == nil {
- return info, nil
- }
- u, err := url.Parse(loc)
- if err != nil {
- return bakery.ThirdPartyInfo{}, errgo.Notef(err, "invalid discharge URL %q", loc)
- }
- if u.Scheme != "https" && !kr.allowInsecure && !AllowInsecureThirdPartyLocator {
- return bakery.ThirdPartyInfo{}, errgo.Newf("untrusted discharge URL %q", loc)
- }
- info, err = ThirdPartyInfoForLocation(ctx, kr.client, loc)
- if err != nil {
- return bakery.ThirdPartyInfo{}, errgo.Mask(err)
- }
- kr.cache.AddInfo(loc, info)
- return info, nil
-}
-
-// ThirdPartyInfoForLocation returns information on the third party
-// discharge server running at the given location URL. Note that this is
-// insecure if an http: URL scheme is used. If client is nil,
-// http.DefaultClient will be used.
-func ThirdPartyInfoForLocation(ctx context.Context, client httprequest.Doer, url string) (bakery.ThirdPartyInfo, error) {
- dclient := newDischargeClient(url, client)
- info, err := dclient.DischargeInfo(ctx, &dischargeInfoRequest{})
- if err == nil {
- return bakery.ThirdPartyInfo{
- PublicKey: *info.PublicKey,
- Version: info.Version,
- }, nil
- }
- derr, ok := errgo.Cause(err).(*httprequest.DecodeResponseError)
- if !ok || derr.Response.StatusCode != http.StatusNotFound {
- return bakery.ThirdPartyInfo{}, errgo.Mask(err)
- }
- // The new endpoint isn't there, so try the old one.
- pkResp, err := dclient.PublicKey(ctx, &publicKeyRequest{})
- if err != nil {
- return bakery.ThirdPartyInfo{}, errgo.Mask(err)
- }
- return bakery.ThirdPartyInfo{
- PublicKey: *pkResp.PublicKey,
- Version: bakery.Version1,
- }, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go
deleted file mode 100644
index c301ad13..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "net/http"
- "time"
-
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// Oven is like bakery.Oven except it provides a method for
-// translating errors returned by bakery.AuthChecker into
-// errors suitable for passing to WriteError.
-type Oven struct {
- // Oven holds the bakery Oven used to create
- // new macaroons to put in discharge-required errors.
- *bakery.Oven
-
- // AuthnExpiry holds the expiry time of macaroons that
- // are created for authentication. As these are generally
- // applicable to all endpoints in an API, this is usually
- // longer than AuthzExpiry. If this is zero, DefaultAuthnExpiry
- // will be used.
- AuthnExpiry time.Duration
-
- // AuthzExpiry holds the expiry time of macaroons that are
- // created for authorization. As these are generally applicable
- // to specific operations, they generally don't need
- // a long lifespan, so this is usually shorter than AuthnExpiry.
- // If this is zero, DefaultAuthzExpiry will be used.
- AuthzExpiry time.Duration
-}
-
-// Default expiry times for macaroons created by Oven.Error.
-const (
- DefaultAuthnExpiry = 7 * 24 * time.Hour
- DefaultAuthzExpiry = 5 * time.Minute
-)
-
-// Error processes an error as returned from bakery.AuthChecker
-// into an error suitable for returning as a response to req
-// with WriteError.
-//
-// Specifically, it translates bakery.ErrPermissionDenied into
-// ErrPermissionDenied and bakery.DischargeRequiredError
-// into an Error with an ErrDischargeRequired code, using
-// oven.Oven to mint the macaroon in it.
-func (oven *Oven) Error(ctx context.Context, req *http.Request, err error) error {
- cause := errgo.Cause(err)
- if cause == bakery.ErrPermissionDenied {
- return errgo.WithCausef(err, ErrPermissionDenied, "")
- }
- derr, ok := cause.(*bakery.DischargeRequiredError)
- if !ok {
- return errgo.Mask(err)
- }
- // TODO it's possible to have more than two levels here - think
- // about some naming scheme for the cookies that allows that.
- expiryDuration := oven.AuthzExpiry
- if expiryDuration == 0 {
- expiryDuration = DefaultAuthzExpiry
- }
- cookieName := "authz"
- if derr.ForAuthentication {
- // Authentication macaroons are a bit different, so use
- // a different cookie name so both can be presented together.
- cookieName = "authn"
- expiryDuration = oven.AuthnExpiry
- if expiryDuration == 0 {
- expiryDuration = DefaultAuthnExpiry
- }
- }
- m, err := oven.Oven.NewMacaroon(ctx, RequestVersion(req), derr.Caveats, derr.Ops...)
- if err != nil {
- return errgo.Notef(err, "cannot mint new macaroon")
- }
- if err := m.AddCaveat(ctx, checkers.TimeBeforeCaveat(time.Now().Add(expiryDuration)), nil, nil); err != nil {
- return errgo.Notef(err, "cannot add time-before caveat")
- }
- return NewDischargeRequiredError(DischargeRequiredErrorParams{
- Macaroon: m,
- CookieNameSuffix: cookieName,
- Request: req,
- })
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go
deleted file mode 100644
index 2f936d7c..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package httpbakery
-
-import (
- "bytes"
- "context"
- "io"
- "net/http"
- "reflect"
- "sync"
- "sync/atomic"
-
- "gopkg.in/errgo.v1"
-)
-
-// newRetrableRequest wraps an HTTP request so that it can
-// be retried without incurring race conditions and reports
-// whether the request can be retried.
-// The client instance will be used to make the request
-// when the do method is called.
-//
-// Because http.NewRequest often wraps its request bodies
-// with ioutil.NopCloser, which hides whether the body is
-// seekable, we extract the seeker from inside the nopCloser if
-// possible.
-//
-// We also work around Go issue 12796 by preventing concurrent
-// reads to the underlying reader after the request body has
-// been closed by Client.Do.
-//
-// The returned value should be closed after use.
-func newRetryableRequest(client *http.Client, req *http.Request) (*retryableRequest, bool) {
- if req.Body == nil {
- return &retryableRequest{
- client: client,
- ref: 1,
- req: req,
- origCookie: req.Header.Get("Cookie"),
- }, true
- }
- body := seekerFromBody(req.Body)
- if body == nil {
- return nil, false
- }
- return &retryableRequest{
- client: client,
- ref: 1,
- req: req,
- body: body,
- origCookie: req.Header.Get("Cookie"),
- }, true
-}
-
-type retryableRequest struct {
- client *http.Client
- ref int32
- origCookie string
- body readSeekCloser
- readStopper *readStopper
- req *http.Request
-}
-
-// do performs the HTTP request.
-func (rreq *retryableRequest) do(ctx context.Context) (*http.Response, error) {
- req, err := rreq.prepare()
- if err != nil {
- return nil, errgo.Mask(err)
- }
- return rreq.client.Do(req.WithContext(ctx))
-}
-
-// prepare returns a new HTTP request object
-// by copying the original request and seeking
-// back to the start of the original body if needed.
-//
-// It needs to make a copy of the request because
-// the HTTP code can access the Request.Body field
-// after Client.Do has returned, which means we can't
-// replace it for the second request.
-func (rreq *retryableRequest) prepare() (*http.Request, error) {
- req := new(http.Request)
- *req = *rreq.req
- // Make sure that the original cookie header is still in place
- // so that we only end up with the cookies that are actually
- // added by the HTTP cookie logic, and not the ones that were
- // added in previous requests too.
- req.Header.Set("Cookie", rreq.origCookie)
- if rreq.body == nil {
- // No need for any of the seek shenanigans.
- return req, nil
- }
- if rreq.readStopper != nil {
- // We've made a previous request. Close its request
- // body so it can't interfere with the new request's body
- // and then seek back to the start.
- rreq.readStopper.Close()
- if _, err := rreq.body.Seek(0, 0); err != nil {
- return nil, errgo.Notef(err, "cannot seek to start of request body")
- }
- }
- atomic.AddInt32(&rreq.ref, 1)
- // Replace the request body with a new readStopper so that
- // we can stop a second request from interfering with current
- // request's body.
- rreq.readStopper = &readStopper{
- rreq: rreq,
- r: rreq.body,
- }
- req.Body = rreq.readStopper
- return req, nil
-}
-
-// close closes the request. It closes the underlying reader
-// when all references have gone.
-func (req *retryableRequest) close() error {
- if atomic.AddInt32(&req.ref, -1) == 0 && req.body != nil {
- // We've closed it for the last time, so actually close
- // the original body.
- return req.body.Close()
- }
- return nil
-}
-
-// readStopper works around an issue with the net/http
-// package (see http://golang.org/issue/12796).
-// Because the first HTTP request might not have finished
-// reading from its body when it returns, we need to
-// ensure that the second request does not race on Read,
-// so this type implements a Reader that prevents all Read
-// calls to the underlying Reader after Close has been called.
-type readStopper struct {
- rreq *retryableRequest
- mu sync.Mutex
- r io.ReadSeeker
-}
-
-func (r *readStopper) Read(buf []byte) (int, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.r == nil {
- // Note: we have to use io.EOF here because otherwise
- // another connection can in rare circumstances be
- // polluted by the error returned here. Although this
- // means the file may appear truncated to the server,
- // that shouldn't matter because the body will only
- // be closed after the server has replied.
- return 0, io.EOF
- }
- return r.r.Read(buf)
-}
-
-func (r *readStopper) Close() error {
- r.mu.Lock()
- alreadyClosed := r.r == nil
- r.r = nil
- r.mu.Unlock()
- if alreadyClosed {
- return nil
- }
- return r.rreq.close()
-}
-
-var nopCloserType = reflect.TypeOf(io.NopCloser(nil))
-var nopCloserWriterToType = reflect.TypeOf(io.NopCloser(bytes.NewReader([]byte{})))
-
-type readSeekCloser interface {
- io.ReadSeeker
- io.Closer
-}
-
-// seekerFromBody tries to obtain a seekable reader
-// from the given request body.
-func seekerFromBody(r io.ReadCloser) readSeekCloser {
- if r, ok := r.(readSeekCloser); ok {
- return r
- }
- rv := reflect.ValueOf(r)
- if rv.Type() != nopCloserType && rv.Type() != nopCloserWriterToType {
- return nil
- }
- // It's a value created by nopCloser. Extract the
- // underlying Reader. Note that this works
- // because the ioutil.nopCloser type exports
- // its Reader field.
- rs, ok := rv.Field(0).Interface().(io.ReadSeeker)
- if !ok {
- return nil
- }
- return readSeekerWithNopClose{rs}
-}
-
-type readSeekerWithNopClose struct {
- io.ReadSeeker
-}
-
-func (r readSeekerWithNopClose) Close() error {
- return nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go
deleted file mode 100644
index 047ebbad..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "net/http"
- "net/url"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
-)
-
-// TODO(rog) rename this file.
-
-// legacyGetInteractionMethods queries a URL as found in an
-// ErrInteractionRequired VisitURL field to find available interaction
-// methods.
-//
-// It does this by sending a GET request to the URL with the Accept
-// header set to "application/json" and parsing the resulting
-// response as a map[string]string.
-//
-// It uses the given Doer to execute the HTTP GET request.
-func legacyGetInteractionMethods(ctx context.Context, logger bakery.Logger, client httprequest.Doer, u *url.URL) map[string]*url.URL {
- methodURLs, err := legacyGetInteractionMethods1(ctx, client, u)
- if err != nil {
- // When a discharger doesn't support retrieving interaction methods,
- // we expect to get an error, because it's probably returning an HTML
- // page not JSON.
- if logger != nil {
- logger.Debugf(ctx, "ignoring error: cannot get interaction methods: %v; %s", err, errgo.Details(err))
- }
- methodURLs = make(map[string]*url.URL)
- }
- if methodURLs["interactive"] == nil {
- // There's no "interactive" method returned, but we know
- // the server does actually support it, because all dischargers
- // are required to, so fill it in with the original URL.
- methodURLs["interactive"] = u
- }
- return methodURLs
-}
-
-func legacyGetInteractionMethods1(ctx context.Context, client httprequest.Doer, u *url.URL) (map[string]*url.URL, error) {
- httpReqClient := &httprequest.Client{
- Doer: client,
- }
- req, err := http.NewRequest("GET", u.String(), nil)
- if err != nil {
- return nil, errgo.Notef(err, "cannot create request")
- }
- req.Header.Set("Accept", "application/json")
- var methodURLStrs map[string]string
- if err := httpReqClient.Do(ctx, req, &methodURLStrs); err != nil {
- return nil, errgo.Mask(err)
- }
- // Make all the URLs relative to the request URL.
- methodURLs := make(map[string]*url.URL)
- for m, urlStr := range methodURLStrs {
- relURL, err := url.Parse(urlStr)
- if err != nil {
- return nil, errgo.Notef(err, "invalid URL for interaction method %q", m)
- }
- methodURLs[m] = u.ResolveReference(relURL)
- }
- return methodURLs, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go
deleted file mode 100644
index a9431fa6..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-// Note: this code was copied from github.com/juju/utils.
-
-// Package httputil holds utility functions related to net/http.
-package httputil
-
-import (
- "errors"
- "strings"
-)
-
-// RelativeURLPath returns a relative URL path that is lexically
-// equivalent to targpath when interpreted by url.URL.ResolveReference.
-// On success, the returned path will always be non-empty and relative
-// to basePath, even if basePath and targPath share no elements.
-//
-// It is assumed that both basePath and targPath are normalized
-// (have no . or .. elements).
-//
-// An error is returned if basePath or targPath are not absolute paths.
-func RelativeURLPath(basePath, targPath string) (string, error) {
- if !strings.HasPrefix(basePath, "/") {
- return "", errors.New("non-absolute base URL")
- }
- if !strings.HasPrefix(targPath, "/") {
- return "", errors.New("non-absolute target URL")
- }
- baseParts := strings.Split(basePath, "/")
- targParts := strings.Split(targPath, "/")
-
- // For the purposes of dotdot, the last element of
- // the paths are irrelevant. We save the last part
- // of the target path for later.
- lastElem := targParts[len(targParts)-1]
- baseParts = baseParts[0 : len(baseParts)-1]
- targParts = targParts[0 : len(targParts)-1]
-
- // Find the common prefix between the two paths:
- var i int
- for ; i < len(baseParts); i++ {
- if i >= len(targParts) || baseParts[i] != targParts[i] {
- break
- }
- }
- dotdotCount := len(baseParts) - i
- targOnly := targParts[i:]
- result := make([]string, 0, dotdotCount+len(targOnly)+1)
- for i := 0; i < dotdotCount; i++ {
- result = append(result, "..")
- }
- result = append(result, targOnly...)
- result = append(result, lastElem)
- final := strings.Join(result, "/")
- if final == "" {
- // If the final result is empty, the last element must
- // have been empty, so the target was slash terminated
- // and there were no previous elements, so "."
- // is appropriate.
- final = "."
- }
- return final, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE b/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE
deleted file mode 100644
index 67c4fb56..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE
+++ /dev/null
@@ -1,187 +0,0 @@
-Copyright © 2014, Roger Peppe, Canonical Inc.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md b/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md
deleted file mode 100644
index 4d03b8a8..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Macaroon ID Protocol Buffers
-
-This module defines the serialization format of macaroon identifiers for
-macaroons created by the macaroon-bakery. For the most part this encoding
-is considered an internal implementation detail of the macaroon-bakery
-and external applications should not rely on any of the details of this
-encoding being maintained between different bakery versions.
-
-This is broken out into a separate module as the protobuf implementation
-works in such a way that one cannot have multiple definitions of a
-message in any particular application's dependency tree. This module
-therefore provides a common definition for use by multiple versions of
-the macaroon-bakery to facilitate easier migration in client applications.
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go
deleted file mode 100644
index f7ddc18b..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Package macaroonpb defines the serialization details of macaroon ids
-// used in the macaroon-bakery.
-package macaroonpb
-
-import (
- "github.com/golang/protobuf/proto"
-)
-
-//go:generate protoc --go_out . id.proto
-
-// MarshalBinary implements encoding.BinaryMarshal.
-func (id *MacaroonId) MarshalBinary() ([]byte, error) {
- return proto.Marshal(id)
-}
-
-// UnmarshalBinary implements encoding.UnmarshalBinary.
-func (id *MacaroonId) UnmarshalBinary(data []byte) error {
- return proto.Unmarshal(data, id)
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go
deleted file mode 100644
index 41b69d9d..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.23.0
-// protoc v3.12.3
-// source: id.proto
-
-package macaroonpb
-
-import (
- proto "github.com/golang/protobuf/proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// This is a compile-time assertion that a sufficiently up-to-date version
-// of the legacy proto package is being used.
-const _ = proto.ProtoPackageIsVersion4
-
-type MacaroonId struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Nonce []byte `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"`
- StorageId []byte `protobuf:"bytes,2,opt,name=storageId,proto3" json:"storageId,omitempty"`
- Ops []*Op `protobuf:"bytes,3,rep,name=ops,proto3" json:"ops,omitempty"`
-}
-
-func (x *MacaroonId) Reset() {
- *x = MacaroonId{}
- if protoimpl.UnsafeEnabled {
- mi := &file_id_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MacaroonId) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MacaroonId) ProtoMessage() {}
-
-func (x *MacaroonId) ProtoReflect() protoreflect.Message {
- mi := &file_id_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MacaroonId.ProtoReflect.Descriptor instead.
-func (*MacaroonId) Descriptor() ([]byte, []int) {
- return file_id_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *MacaroonId) GetNonce() []byte {
- if x != nil {
- return x.Nonce
- }
- return nil
-}
-
-func (x *MacaroonId) GetStorageId() []byte {
- if x != nil {
- return x.StorageId
- }
- return nil
-}
-
-func (x *MacaroonId) GetOps() []*Op {
- if x != nil {
- return x.Ops
- }
- return nil
-}
-
-type Op struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
- Actions []string `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"`
-}
-
-func (x *Op) Reset() {
- *x = Op{}
- if protoimpl.UnsafeEnabled {
- mi := &file_id_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Op) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Op) ProtoMessage() {}
-
-func (x *Op) ProtoReflect() protoreflect.Message {
- mi := &file_id_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Op.ProtoReflect.Descriptor instead.
-func (*Op) Descriptor() ([]byte, []int) {
- return file_id_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Op) GetEntity() string {
- if x != nil {
- return x.Entity
- }
- return ""
-}
-
-func (x *Op) GetActions() []string {
- if x != nil {
- return x.Actions
- }
- return nil
-}
-
-var File_id_proto protoreflect.FileDescriptor
-
-var file_id_proto_rawDesc = []byte{
- 0x0a, 0x08, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x0a, 0x4d, 0x61,
- 0x63, 0x61, 0x72, 0x6f, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1c,
- 0x0a, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x03,
- 0x6f, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x03, 0x2e, 0x4f, 0x70, 0x52, 0x03,
- 0x6f, 0x70, 0x73, 0x22, 0x36, 0x0a, 0x02, 0x4f, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74,
- 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0e, 0x5a, 0x0c, 0x2e,
- 0x3b, 0x6d, 0x61, 0x63, 0x61, 0x72, 0x6f, 0x6f, 0x6e, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
-}
-
-var (
- file_id_proto_rawDescOnce sync.Once
- file_id_proto_rawDescData = file_id_proto_rawDesc
-)
-
-func file_id_proto_rawDescGZIP() []byte {
- file_id_proto_rawDescOnce.Do(func() {
- file_id_proto_rawDescData = protoimpl.X.CompressGZIP(file_id_proto_rawDescData)
- })
- return file_id_proto_rawDescData
-}
-
-var file_id_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_id_proto_goTypes = []interface{}{
- (*MacaroonId)(nil), // 0: MacaroonId
- (*Op)(nil), // 1: Op
-}
-var file_id_proto_depIdxs = []int32{
- 1, // 0: MacaroonId.ops:type_name -> Op
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_id_proto_init() }
-func file_id_proto_init() {
- if File_id_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_id_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MacaroonId); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_id_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Op); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_id_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_id_proto_goTypes,
- DependencyIndexes: file_id_proto_depIdxs,
- MessageInfos: file_id_proto_msgTypes,
- }.Build()
- File_id_proto = out.File
- file_id_proto_rawDesc = nil
- file_id_proto_goTypes = nil
- file_id_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto
deleted file mode 100644
index bfe891ee..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto
+++ /dev/null
@@ -1,14 +0,0 @@
-syntax="proto3";
-
-option go_package = ".;macaroonpb";
-
-message MacaroonId {
- bytes nonce = 1;
- bytes storageId = 2;
- repeated Op ops = 3;
-}
-
-message Op {
- string entity = 1;
- repeated string actions = 2;
-}
diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml
index e24a6c14..22f8d21c 100644
--- a/vendor/github.com/go-openapi/analysis/.golangci.yml
+++ b/vendor/github.com/go-openapi/analysis/.golangci.yml
@@ -4,53 +4,58 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 40
- gocognit:
- min-complexity: 40
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 150
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
linters:
enable-all: true
disable:
- maligned
+ - unparam
- lll
- - gochecknoglobals
- gochecknoinits
- # scopelint is useful, but also reports false positives
- # that unfortunately can't be disabled. So we disable the
- # linter rather than changing code that works.
- # see: https://github.com/kyoh86/scopelint/issues/4
- - scopelint
+ - gochecknoglobals
+ - funlen
- godox
- gocognit
- #- whitespace
+ - whitespace
- wsl
- - funlen
- - testpackage
- wrapcheck
- #- nlreturn
+ - testpackage
+ - nlreturn
- gomnd
- - goerr113
- exhaustivestruct
- #- errorlint
- #- nestif
- - gofumpt
+ - goerr113
+ - errorlint
+ - nestif
- godot
- - gci
- - dogsled
+ - gofumpt
- paralleltest
- tparallel
- thelper
- ifshort
- - forbidigo
- - cyclop
- - varnamelen
- exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
- nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
- nosnakecase
diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md
index aad6da10..e005d4b3 100644
--- a/vendor/github.com/go-openapi/analysis/README.md
+++ b/vendor/github.com/go-openapi/analysis/README.md
@@ -1,8 +1,5 @@
-# OpenAPI initiative analysis
+# OpenAPI analysis [](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/analysis)
-[](https://travis-ci.org/go-openapi/analysis)
-[](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master)
-[](https://codecov.io/gh/go-openapi/analysis)
[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE)
[](https://pkg.go.dev/github.com/go-openapi/analysis)
@@ -13,12 +10,12 @@ A foundational library to analyze an OAI specification document for easier reaso
## What's inside?
-* A analyzer providing methods to walk the functional content of a specification
+* An analyzer providing methods to walk the functional content of a specification
* A spec flattener producing a self-contained document bundle, while preserving `$ref`s
* A spec merger ("mixin") to merge several spec documents into a primary spec
* A spec "fixer" ensuring that response descriptions are non empty
-[Documentation](https://godoc.org/github.com/go-openapi/analysis)
+[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis)
## FAQ
@@ -28,4 +25,3 @@ A foundational library to analyze an OAI specification document for easier reaso
> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0).
> There is no plan to make it evolve toward supporting OpenAPI 3.x.
> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
->
diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml
deleted file mode 100644
index c2f6fd73..00000000
--- a/vendor/github.com/go-openapi/analysis/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: "0.1.{build}"
-
-clone_folder: C:\go-openapi\analysis
-shallow_clone: true # for startup speed
-pull_requests:
- do_not_increment_build_number: true
-
-#skip_tags: true
-#skip_branch_with_pr: true
-
-# appveyor.yml
-build: off
-
-environment:
- GOPATH: c:\gopath
-
-stack: go 1.16
-
-test_script:
- - go test -v -timeout 20m ./...
-
-deploy: off
-
-notifications:
- - provider: Slack
- incoming_webhook: https://hooks.slack.com/services/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
- auth_token:
- secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4=
- channel: bots
- on_build_success: false
- on_build_failure: true
- on_build_status_changed: true
diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go
index d5294c09..e8d9f9b1 100644
--- a/vendor/github.com/go-openapi/analysis/doc.go
+++ b/vendor/github.com/go-openapi/analysis/doc.go
@@ -16,27 +16,27 @@
Package analysis provides methods to work with a Swagger specification document from
package go-openapi/spec.
-Analyzing a specification
+## Analyzing a specification
An analysed specification object (type Spec) provides methods to work with swagger definition.
-Flattening or expanding a specification
+## Flattening or expanding a specification
Flattening a specification bundles all remote $ref in the main spec document.
Depending on flattening options, additional preprocessing may take place:
- full flattening: replacing all inline complex constructs by a named entry in #/definitions
- expand: replace all $ref's in the document by their expanded content
-Merging several specifications
+## Merging several specifications
Mixin several specifications merges all Swagger constructs, and warns about found conflicts.
-Fixing a specification
+## Fixing a specification
Unmarshalling a specification with golang json unmarshalling may lead to
some unwanted result on present but empty fields.
-Analyzing a Swagger schema
+## Analyzing a Swagger schema
Swagger schemas are analyzed to determine their complexity and qualify their content.
*/
diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go
index 0576220f..ebedcc9d 100644
--- a/vendor/github.com/go-openapi/analysis/flatten.go
+++ b/vendor/github.com/go-openapi/analysis/flatten.go
@@ -62,28 +62,26 @@ func newContext() *context {
//
// There is a minimal and a full flattening mode.
//
-//
// Minimally flattening a spec means:
-// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left
-// unscathed)
-// - Importing external (http, file) references so they become internal to the document
-// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers
-// like "$ref": "#/definitions/myObject/allOfs/1")
+// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left
+// unscathed)
+// - Importing external (http, file) references so they become internal to the document
+// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers
+// like "$ref": "#/definitions/myObject/allOfs/1")
//
// A minimally flattened spec thus guarantees the following properties:
-// - all $refs point to a local definition (i.e. '#/definitions/...')
-// - definitions are unique
+// - all $refs point to a local definition (i.e. '#/definitions/...')
+// - definitions are unique
//
// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they
// represent a complex schema or express commonality in the spec.
// Otherwise, they are simply expanded.
// Self-referencing JSON pointers cannot resolve to a type and trigger an error.
//
-//
// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger.
//
// Fully flattening a spec means:
-// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion.
+// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion.
//
// By complex, we mean every JSON object with some properties.
// Arrays, when they do not define a tuple,
@@ -93,22 +91,21 @@ func newContext() *context {
// have been created.
//
// Available flattening options:
-// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched
-// - Expand: expand all $ref's in the document (inoperant if Minimal set to true)
-// - Verbose: croaks about name conflicts detected
-// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening
+// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched
+// - Expand: expand all $ref's in the document (inoperant if Minimal set to true)
+// - Verbose: croaks about name conflicts detected
+// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening
//
// NOTE: expansion removes all $ref save circular $ref, which remain in place
//
// TODO: additional options
-// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a
-// x-go-name extension
-// - LiftAllOfs:
-// - limit the flattening of allOf members when simple objects
-// - merge allOf with validation only
-// - merge allOf with extensions only
-// - ...
-//
+// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a
+// x-go-name extension
+// - LiftAllOfs:
+// - limit the flattening of allOf members when simple objects
+// - merge allOf with validation only
+// - merge allOf with extensions only
+// - ...
func Flatten(opts FlattenOpts) error {
debugLog("FlattenOpts: %#v", opts)
@@ -270,6 +267,12 @@ func nameInlinedSchemas(opts *FlattenOpts) error {
}
func removeUnused(opts *FlattenOpts) {
+ for removeUnusedSinglePass(opts) {
+ // continue until no unused definition remains
+ }
+}
+
+func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) {
expected := make(map[string]struct{})
for k := range opts.Swagger().Definitions {
expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{}
@@ -280,6 +283,7 @@ func removeUnused(opts *FlattenOpts) {
}
for k := range expected {
+ hasRemoved = true
debugLog("removing unused definition %s", path.Base(k))
if opts.Verbose {
log.Printf("info: removing unused definition: %s", path.Base(k))
@@ -288,6 +292,8 @@ func removeUnused(opts *FlattenOpts) {
}
opts.Spec.reload() // re-analyze
+
+ return hasRemoved
}
func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error {
@@ -334,7 +340,7 @@ func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) err
}
// generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name
- newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref))
+ newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts))
debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen)
opts.flattenContext.resolved[refStr] = newName
@@ -488,9 +494,9 @@ func stripPointersAndOAIGen(opts *FlattenOpts) error {
// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions.
//
// A dedupe is deemed unnecessary whenever:
-// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining)
-// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to
-// the first parent.
+// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining)
+// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to
+// the first parent.
//
// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate
// pointer and name resolution again.
@@ -652,6 +658,7 @@ func namePointers(opts *FlattenOpts) error {
refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas))
for k, ref := range opts.Spec.references.allRefs {
+ debugLog("name pointers: %q => %#v", k, ref)
if path.Dir(ref.String()) == definitionsPath {
// this a ref to a top-level definition: ok
continue
@@ -769,6 +776,10 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema
// identifying edge case when the namer did nothing because we point to a non-schema object
// no definition is created and we expand the $ref for all callers
+ debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t",
+ asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(),
+ )
+
if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() {
debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String())
if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil {
@@ -791,6 +802,7 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema
return nil
}
+ // everything that is a simple schema and not factorizable is expanded
debugLog("expand JSON pointer for key=%s", key)
if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil {
diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go
index 3ad2ccfb..c7d7938e 100644
--- a/vendor/github.com/go-openapi/analysis/flatten_name.go
+++ b/vendor/github.com/go-openapi/analysis/flatten_name.go
@@ -33,12 +33,14 @@ func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *Ana
}
// create unique name
- newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name))
+ mangle := mangler(isn.opts)
+ newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name))
// clone schema
sch := schutils.Clone(schema)
// replace values on schema
+ debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName)
if err := replace.RewriteSchemaToRef(isn.Spec, key,
spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil {
return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err)
@@ -149,13 +151,15 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma
startIndex int
)
- if parts.IsOperation() {
+ switch {
+ case parts.IsOperation():
baseNames, startIndex = namesForOperation(parts, operations)
- }
-
- // definitions
- if parts.IsDefinition() {
+ case parts.IsDefinition():
baseNames, startIndex = namesForDefinition(parts)
+ default:
+ // this a non-standard pointer: build a name by concatenating its parts
+ baseNames = [][]string{parts}
+ startIndex = len(baseNames) + 1
}
result := make([]string, 0, len(baseNames))
@@ -169,6 +173,7 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma
}
sort.Strings(result)
+ debugLog("names from parts: %v => %v", parts, result)
return result
}
@@ -256,10 +261,20 @@ func partAdder(aschema *AnalyzedSchema) sortref.PartAdder {
}
}
-func nameFromRef(ref spec.Ref) string {
+func mangler(o *FlattenOpts) func(string) string {
+ if o.KeepNames {
+ return func(in string) string { return in }
+ }
+
+ return swag.ToJSONName
+}
+
+func nameFromRef(ref spec.Ref, o *FlattenOpts) string {
+ mangle := mangler(o)
+
u := ref.GetURL()
if u.Fragment != "" {
- return swag.ToJSONName(path.Base(u.Fragment))
+ return mangle(path.Base(u.Fragment))
}
if u.Path != "" {
@@ -267,19 +282,19 @@ func nameFromRef(ref spec.Ref) string {
if bn != "" && bn != "/" {
ext := path.Ext(bn)
if ext != "" {
- return swag.ToJSONName(bn[:len(bn)-len(ext)])
+ return mangle(bn[:len(bn)-len(ext)])
}
- return swag.ToJSONName(bn)
+ return mangle(bn)
}
}
- return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " "))
+ return mangle(strings.ReplaceAll(u.Host, ".", " "))
}
// GenLocation indicates from which section of the specification (models or operations) a definition has been created.
//
-// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided
+// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided
// for information only.
func GenLocation(parts sortref.SplitKey) string {
switch {
diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go
index c5bb97b0..c943fe1e 100644
--- a/vendor/github.com/go-openapi/analysis/flatten_options.go
+++ b/vendor/github.com/go-openapi/analysis/flatten_options.go
@@ -26,6 +26,7 @@ type FlattenOpts struct {
Verbose bool // enable some reporting on possible name conflicts detected
RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening
ContinueOnError bool // Continue when spec expansion issues are found
+ KeepNames bool // Do not attempt to jsonify names from references when flattening
/* Extra keys */
_ struct{} // require keys
diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go
index ec0fec02..39f55a97 100644
--- a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go
+++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go
@@ -29,7 +29,7 @@ var (
// GetLogger provides a prefix debug logger
func GetLogger(prefix string, debug bool) func(string, ...interface{}) {
if debug {
- logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags)
+ logger := log.New(output, prefix+":", log.LstdFlags)
return func(msg string, args ...interface{}) {
_, file1, pos1, _ := runtime.Caller(1)
@@ -37,5 +37,5 @@ func GetLogger(prefix string, debug bool) func(string, ...interface{}) {
}
}
- return func(msg string, args ...interface{}) {}
+ return func(_ string, _ ...interface{}) {}
}
diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go
index 26c2a05a..c0f43e72 100644
--- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go
+++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go
@@ -1,6 +1,7 @@
package replace
import (
+ "encoding/json"
"fmt"
"net/url"
"os"
@@ -40,6 +41,8 @@ func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error {
if refable.Schema != nil {
refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
}
+ case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{}
+ return rewriteParentRef(sp, key, ref)
default:
return fmt.Errorf("no schema with ref found at %s for %T", key, value)
}
@@ -120,6 +123,9 @@ func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error {
case spec.SchemaProperties:
container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+ case *interface{}:
+ *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
// NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema
default:
@@ -318,8 +324,8 @@ type DeepestRefResult struct {
}
// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions.
-// - if no definition is found, returns the deepest ref.
-// - pointers to external files are expanded
+// - if no definition is found, returns the deepest ref.
+// - pointers to external files are expanded
//
// NOTE: all external $ref's are assumed to be already expanded at this stage.
func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) {
@@ -385,8 +391,9 @@ DOWNREF:
err := asSchema.UnmarshalJSON(asJSON)
if err != nil {
return nil,
- fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T",
- currentRef.String(), value)
+ fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
}
warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String()))
@@ -402,8 +409,9 @@ DOWNREF:
var asSchema spec.Schema
if err := asSchema.UnmarshalJSON(asJSON); err != nil {
return nil,
- fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T",
- currentRef.String(), value)
+ fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
}
warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String()))
@@ -414,9 +422,25 @@ DOWNREF:
currentRef = asSchema.Ref
default:
- return nil,
- fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T",
- currentRef.String(), value)
+ // fallback: attempts to resolve the pointer as a schema
+ if refable == nil {
+ break DOWNREF
+ }
+
+ asJSON, _ := json.Marshal(refable)
+ var asSchema spec.Schema
+ if err := asSchema.UnmarshalJSON(asJSON); err != nil {
+ return nil,
+ fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
+ }
+ warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable))
+
+ if asSchema.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = asSchema.Ref
}
}
diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go
index 18e552ea..ac80fc2e 100644
--- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go
+++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go
@@ -69,7 +69,7 @@ func KeyParts(key string) SplitKey {
return res
}
-// SplitKey holds of the parts of a /-separated key, soi that their location may be determined.
+// SplitKey holds of the parts of a /-separated key, so that their location may be determined.
type SplitKey []string
// IsDefinition is true when the split key is in the #/definitions section of a spec
diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go
index b2530526..7785a29b 100644
--- a/vendor/github.com/go-openapi/analysis/mixin.go
+++ b/vendor/github.com/go-openapi/analysis/mixin.go
@@ -53,7 +53,7 @@ import (
// collisions.
func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
skipped := make([]string, 0, len(mixins))
- opIds := getOpIds(primary)
+ opIDs := getOpIDs(primary)
initPrimary(primary)
for i, m := range mixins {
@@ -74,7 +74,7 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
skipped = append(skipped, mergeDefinitions(primary, m)...)
// merging paths requires a map of operationIDs to work with
- skipped = append(skipped, mergePaths(primary, m, opIds, i)...)
+ skipped = append(skipped, mergePaths(primary, m, opIDs, i)...)
skipped = append(skipped, mergeParameters(primary, m)...)
@@ -84,9 +84,9 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
return skipped
}
-// getOpIds extracts all the paths..operationIds from the given
+// getOpIDs extracts all the paths..operationIds from the given
// spec and returns them as the keys in a map with 'true' values.
-func getOpIds(s *spec.Swagger) map[string]bool {
+func getOpIDs(s *spec.Swagger) map[string]bool {
rv := make(map[string]bool)
if s.Paths == nil {
return rv
@@ -179,7 +179,7 @@ func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string)
return
}
-func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) {
+func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) {
if m.Paths != nil {
for k, v := range m.Paths.Paths {
if _, exists := primary.Paths.Paths[k]; exists {
@@ -198,10 +198,10 @@ func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, m
// all the proivded specs are already unique.
piops := pathItemOps(v)
for _, piop := range piops {
- if opIds[piop.ID] {
+ if opIDs[piop.ID] {
piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex)
}
- opIds[piop.ID] = true
+ opIDs[piop.ID] = true
}
primary.Paths.Paths[k] = v
}
@@ -367,7 +367,7 @@ func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string {
return skipped
}
-// nolint: unparam
+//nolint:unparam
func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string {
if primary.Description == "" {
primary.Description = m.Description
diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go
index fc055095..ab190db5 100644
--- a/vendor/github.com/go-openapi/analysis/schema.go
+++ b/vendor/github.com/go-openapi/analysis/schema.go
@@ -1,7 +1,7 @@
package analysis
import (
- "fmt"
+ "errors"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
@@ -19,7 +19,7 @@ type SchemaOpts struct {
// patterns.
func Schema(opts SchemaOpts) (*AnalyzedSchema, error) {
if opts.Schema == nil {
- return nil, fmt.Errorf("no schema to analyze")
+ return nil, errors.New("no schema to analyze")
}
a := &AnalyzedSchema{
@@ -247,10 +247,10 @@ func (a *AnalyzedSchema) isArrayType() bool {
// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex").
//
// Complex means the schema is any of:
-// - a simple type (primitive)
-// - an array of something (items are possibly complex ; if this is the case, items will generate a definition)
-// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will
-// generate a definition)
+// - a simple type (primitive)
+// - an array of something (items are possibly complex ; if this is the case, items will generate a definition)
+// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will
+// generate a definition)
func (a *AnalyzedSchema) isAnalyzedAsComplex() bool {
return !a.IsSimpleSchema && !a.IsArray && !a.IsMap
}
diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml
index 4e1fc0c7..60798c21 100644
--- a/vendor/github.com/go-openapi/errors/.golangci.yml
+++ b/vendor/github.com/go-openapi/errors/.golangci.yml
@@ -1,48 +1,75 @@
-linters-settings:
- govet:
- check-shadowing: true
- golint:
- min-confidence: 0
- gocyclo:
- min-complexity: 30
- maligned:
- suggest-new: true
- dupl:
- threshold: 100
- goconst:
- min-len: 2
- min-occurrences: 4
+version: "2"
linters:
- enable-all: true
+ default: all
disable:
- - maligned
- - lll
- - gochecknoglobals
- - godox
- - gocognit
- - whitespace
- - wsl
+ - cyclop
+ - depguard
+ - errchkjson
+ - errorlint
+ - exhaustruct
+ - forcetypeassert
- funlen
- gochecknoglobals
- gochecknoinits
- - scopelint
- - wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- - testpackage
- - gci
- - gofumpt
- - goerr113
- - gomnd
- - tparallel
- - nestif
+ - gocognit
- godot
- - errorlint
+ - godox
+ - gosmopolitan
+ - inamedparam
+ - intrange # disabled while < go1.22
+ - ireturn
+ - lll
+ - musttag
+ - nestif
+ - nlreturn
+ - noinlineerr
+ - nonamedreturns
- paralleltest
+ - recvcheck
+ - testpackage
+ - thelper
- tparallel
- - cyclop
- - errname
+ - unparam
- varnamelen
- - exhaustruct
- - maintidx
+ - whitespace
+ - wrapcheck
+ - wsl
+ - wsl_v5
+ settings:
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+ gocyclo:
+ min-complexity: 45
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+issues:
+ # Maximum issues count per one linter.
+ # Set to 0 to disable.
+ # Default: 50
+ max-issues-per-linter: 0
+ # Maximum count of issues with the same text.
+ # Set to 0 to disable.
+ # Default: 3
+ max-same-issues: 0
diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md
index 4aac049e..6d57ea55 100644
--- a/vendor/github.com/go-openapi/errors/README.md
+++ b/vendor/github.com/go-openapi/errors/README.md
@@ -1,11 +1,8 @@
-# OpenAPI errors
+# OpenAPI errors [](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/errors)
-[](https://travis-ci.org/go-openapi/errors)
-[](https://codecov.io/gh/go-openapi/errors)
[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE)
[](https://pkg.go.dev/github.com/go-openapi/errors)
-[](https://golangci.com)
[](https://goreportcard.com/report/github.com/go-openapi/errors)
Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit.
diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go
index c13f3435..d6f507f4 100644
--- a/vendor/github.com/go-openapi/errors/api.go
+++ b/vendor/github.com/go-openapi/errors/api.go
@@ -55,9 +55,15 @@ func (a apiError) MarshalJSON() ([]byte, error) {
// New creates a new API error with a code and a message
func New(code int32, message string, args ...interface{}) Error {
if len(args) > 0 {
- return &apiError{code, fmt.Sprintf(message, args...)}
+ return &apiError{
+ code: code,
+ message: fmt.Sprintf(message, args...),
+ }
+ }
+ return &apiError{
+ code: code,
+ message: message,
}
- return &apiError{code, message}
}
// NotFound creates a new not found error
@@ -130,10 +136,14 @@ func flattenComposite(errs *CompositeError) *CompositeError {
// MethodNotAllowed creates a new method not allowed error
func MethodNotAllowed(requested string, allow []string) Error {
msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ","))
- return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg}
+ return &MethodNotAllowedError{
+ code: http.StatusMethodNotAllowed,
+ Allowed: allow,
+ message: msg,
+ }
}
-// ServeError the error handler interface implementation
+// ServeError implements the http error handler interface
func ServeError(rw http.ResponseWriter, r *http.Request, err error) {
rw.Header().Set("Content-Type", "application/json")
switch e := err.(type) {
@@ -175,7 +185,7 @@ func ServeError(rw http.ResponseWriter, r *http.Request, err error) {
}
func asHTTPCode(input int) int {
- if input >= 600 {
+ if input >= maximumValidHTTPCode {
return DefaultHTTPCode
}
return input
diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go
index dfebe8f9..6ea1151f 100644
--- a/vendor/github.com/go-openapi/errors/headers.go
+++ b/vendor/github.com/go-openapi/errors/headers.go
@@ -21,7 +21,7 @@ import (
)
// Validation represents a failure of a precondition
-type Validation struct {
+type Validation struct { //nolint: errname
code int32
Name string
In string
diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go
index 963472d1..1b9f3a93 100644
--- a/vendor/github.com/go-openapi/errors/middleware.go
+++ b/vendor/github.com/go-openapi/errors/middleware.go
@@ -22,7 +22,7 @@ import (
// APIVerificationFailed is an error that contains all the missing info for a mismatched section
// between the api registrations and the api spec
-type APIVerificationFailed struct {
+type APIVerificationFailed struct { //nolint: errname
Section string `json:"section,omitempty"`
MissingSpecification []string `json:"missingSpecification,omitempty"`
MissingRegistration []string `json:"missingRegistration,omitempty"`
@@ -35,7 +35,7 @@ func (v *APIVerificationFailed) Error() string {
hasSpecMissing := len(v.MissingSpecification) > 0
if hasRegMissing {
- buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section))
+ fmt.Fprintf(buf, "missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)
}
if hasRegMissing && hasSpecMissing {
@@ -43,7 +43,7 @@ func (v *APIVerificationFailed) Error() string {
}
if hasSpecMissing {
- buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section))
+ fmt.Fprintf(buf, "missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)
}
return buf.String()
diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go
index 5096e1ea..34930c08 100644
--- a/vendor/github.com/go-openapi/errors/parsing.go
+++ b/vendor/github.com/go-openapi/errors/parsing.go
@@ -17,6 +17,7 @@ package errors
import (
"encoding/json"
"fmt"
+ "net/http"
)
// ParseError represents a parsing error
@@ -29,6 +30,24 @@ type ParseError struct {
message string
}
+// NewParseError creates a new parse error
+func NewParseError(name, in, value string, reason error) *ParseError {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason)
+ } else {
+ msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason)
+ }
+ return &ParseError{
+ code: http.StatusBadRequest,
+ Name: name,
+ In: in,
+ Value: value,
+ Reason: reason,
+ message: msg,
+ }
+}
+
func (e *ParseError) Error() string {
return e.message
}
@@ -58,21 +77,3 @@ const (
parseErrorTemplContent = `parsing %s %s from %q failed, because %s`
parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s`
)
-
-// NewParseError creates a new parse error
-func NewParseError(name, in, value string, reason error) *ParseError {
- var msg string
- if in == "" {
- msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason)
- } else {
- msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason)
- }
- return &ParseError{
- code: 400,
- Name: name,
- In: in,
- Value: value,
- Reason: reason,
- message: msg,
- }
-}
diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go
index da5f6c78..8f3239df 100644
--- a/vendor/github.com/go-openapi/errors/schema.go
+++ b/vendor/github.com/go-openapi/errors/schema.go
@@ -17,6 +17,7 @@ package errors
import (
"encoding/json"
"fmt"
+ "net/http"
"strings"
)
@@ -32,12 +33,12 @@ const (
patternFail = "%s in %s should match '%s'"
enumFail = "%s in %s should be one of %v"
multipleOfFail = "%s in %s should be a multiple of %v"
- maxIncFail = "%s in %s should be less than or equal to %v"
- maxExcFail = "%s in %s should be less than %v"
+ maximumIncFail = "%s in %s should be less than or equal to %v"
+ maximumExcFail = "%s in %s should be less than %v"
minIncFail = "%s in %s should be greater than or equal to %v"
minExcFail = "%s in %s should be greater than %v"
uniqueFail = "%s in %s shouldn't contain duplicates"
- maxItemsFail = "%s in %s should have at most %d items"
+ maximumItemsFail = "%s in %s should have at most %d items"
minItemsFail = "%s in %s should have at least %d items"
typeFailNoIn = "%s must be of type %s"
typeFailWithDataNoIn = "%s must be of type %s: %q"
@@ -49,12 +50,12 @@ const (
patternFailNoIn = "%s should match '%s'"
enumFailNoIn = "%s should be one of %v"
multipleOfFailNoIn = "%s should be a multiple of %v"
- maxIncFailNoIn = "%s should be less than or equal to %v"
- maxExcFailNoIn = "%s should be less than %v"
+ maximumIncFailNoIn = "%s should be less than or equal to %v"
+ maximumExcFailNoIn = "%s should be less than %v"
minIncFailNoIn = "%s should be greater than or equal to %v"
minExcFailNoIn = "%s should be greater than %v"
uniqueFailNoIn = "%s shouldn't contain duplicates"
- maxItemsFailNoIn = "%s should have at most %d items"
+ maximumItemsFailNoIn = "%s should have at most %d items"
minItemsFailNoIn = "%s should have at least %d items"
noAdditionalItems = "%s in %s can't have additional items"
noAdditionalItemsNoIn = "%s can't have additional items"
@@ -69,14 +70,17 @@ const (
multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v"
)
+const maximumValidHTTPCode = 600
+
// All code responses can be used to differentiate errors for different handling
// by the consuming program
const (
// CompositeErrorCode remains 422 for backwards-compatibility
// and to separate it from validation errors with cause
- CompositeErrorCode = 422
+ CompositeErrorCode = http.StatusUnprocessableEntity
+
// InvalidTypeCode is used for any subclass of invalid types
- InvalidTypeCode = 600 + iota
+ InvalidTypeCode = maximumValidHTTPCode + iota
RequiredFailCode
TooLongFailCode
TooShortFailCode
@@ -120,6 +124,10 @@ func (c *CompositeError) Error() string {
return c.message
}
+func (c *CompositeError) Unwrap() []error {
+ return c.Errors
+}
+
// MarshalJSON implements the JSON encoding interface
func (c CompositeError) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
@@ -133,7 +141,7 @@ func (c CompositeError) MarshalJSON() ([]byte, error) {
func CompositeValidationError(errors ...error) *CompositeError {
return &CompositeError{
code: CompositeErrorCode,
- Errors: append([]error{}, errors...),
+ Errors: append(make([]error, 0, len(errors)), errors...),
message: "validation failure list",
}
}
@@ -294,10 +302,10 @@ func DuplicateItems(name, in string) *Validation {
}
// TooManyItems error for when an array contains too many items
-func TooManyItems(name, in string, max int64, value interface{}) *Validation {
- msg := fmt.Sprintf(maxItemsFail, name, in, max)
+func TooManyItems(name, in string, maximum int64, value interface{}) *Validation {
+ msg := fmt.Sprintf(maximumItemsFail, name, in, maximum)
if in == "" {
- msg = fmt.Sprintf(maxItemsFailNoIn, name, max)
+ msg = fmt.Sprintf(maximumItemsFailNoIn, name, maximum)
}
return &Validation{
@@ -310,10 +318,10 @@ func TooManyItems(name, in string, max int64, value interface{}) *Validation {
}
// TooFewItems error for when an array contains too few items
-func TooFewItems(name, in string, min int64, value interface{}) *Validation {
- msg := fmt.Sprintf(minItemsFail, name, in, min)
+func TooFewItems(name, in string, minimum int64, value interface{}) *Validation {
+ msg := fmt.Sprintf(minItemsFail, name, in, minimum)
if in == "" {
- msg = fmt.Sprintf(minItemsFailNoIn, name, min)
+ msg = fmt.Sprintf(minItemsFailNoIn, name, minimum)
}
return &Validation{
code: MinItemsFailCode,
@@ -324,21 +332,21 @@ func TooFewItems(name, in string, min int64, value interface{}) *Validation {
}
}
-// ExceedsMaximumInt error for when maximum validation fails
-func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation {
+// ExceedsMaximumInt error for when maximumimum validation fails
+func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value interface{}) *Validation {
var message string
if in == "" {
- m := maxIncFailNoIn
+ m := maximumIncFailNoIn
if exclusive {
- m = maxExcFailNoIn
+ m = maximumExcFailNoIn
}
- message = fmt.Sprintf(m, name, max)
+ message = fmt.Sprintf(m, name, maximum)
} else {
- m := maxIncFail
+ m := maximumIncFail
if exclusive {
- m = maxExcFail
+ m = maximumExcFail
}
- message = fmt.Sprintf(m, name, in, max)
+ message = fmt.Sprintf(m, name, in, maximum)
}
return &Validation{
code: MaxFailCode,
@@ -349,21 +357,21 @@ func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interfa
}
}
-// ExceedsMaximumUint error for when maximum validation fails
-func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation {
+// ExceedsMaximumUint error for when maximumimum validation fails
+func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value interface{}) *Validation {
var message string
if in == "" {
- m := maxIncFailNoIn
+ m := maximumIncFailNoIn
if exclusive {
- m = maxExcFailNoIn
+ m = maximumExcFailNoIn
}
- message = fmt.Sprintf(m, name, max)
+ message = fmt.Sprintf(m, name, maximum)
} else {
- m := maxIncFail
+ m := maximumIncFail
if exclusive {
- m = maxExcFail
+ m = maximumExcFail
}
- message = fmt.Sprintf(m, name, in, max)
+ message = fmt.Sprintf(m, name, in, maximum)
}
return &Validation{
code: MaxFailCode,
@@ -374,21 +382,21 @@ func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value inter
}
}
-// ExceedsMaximum error for when maximum validation fails
-func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation {
+// ExceedsMaximum error for when maximumimum validation fails
+func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value interface{}) *Validation {
var message string
if in == "" {
- m := maxIncFailNoIn
+ m := maximumIncFailNoIn
if exclusive {
- m = maxExcFailNoIn
+ m = maximumExcFailNoIn
}
- message = fmt.Sprintf(m, name, max)
+ message = fmt.Sprintf(m, name, maximum)
} else {
- m := maxIncFail
+ m := maximumIncFail
if exclusive {
- m = maxExcFail
+ m = maximumExcFail
}
- message = fmt.Sprintf(m, name, in, max)
+ message = fmt.Sprintf(m, name, in, maximum)
}
return &Validation{
code: MaxFailCode,
@@ -400,20 +408,20 @@ func ExceedsMaximum(name, in string, max float64, exclusive bool, value interfac
}
// ExceedsMinimumInt error for when minimum validation fails
-func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation {
+func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value interface{}) *Validation {
var message string
if in == "" {
m := minIncFailNoIn
if exclusive {
m = minExcFailNoIn
}
- message = fmt.Sprintf(m, name, min)
+ message = fmt.Sprintf(m, name, minimum)
} else {
m := minIncFail
if exclusive {
m = minExcFail
}
- message = fmt.Sprintf(m, name, in, min)
+ message = fmt.Sprintf(m, name, in, minimum)
}
return &Validation{
code: MinFailCode,
@@ -425,20 +433,20 @@ func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interfa
}
// ExceedsMinimumUint error for when minimum validation fails
-func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation {
+func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value interface{}) *Validation {
var message string
if in == "" {
m := minIncFailNoIn
if exclusive {
m = minExcFailNoIn
}
- message = fmt.Sprintf(m, name, min)
+ message = fmt.Sprintf(m, name, minimum)
} else {
m := minIncFail
if exclusive {
m = minExcFail
}
- message = fmt.Sprintf(m, name, in, min)
+ message = fmt.Sprintf(m, name, in, minimum)
}
return &Validation{
code: MinFailCode,
@@ -450,20 +458,20 @@ func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value inter
}
// ExceedsMinimum error for when minimum validation fails
-func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation {
+func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value interface{}) *Validation {
var message string
if in == "" {
m := minIncFailNoIn
if exclusive {
m = minExcFailNoIn
}
- message = fmt.Sprintf(m, name, min)
+ message = fmt.Sprintf(m, name, minimum)
} else {
m := minIncFail
if exclusive {
m = minExcFail
}
- message = fmt.Sprintf(m, name, in, min)
+ message = fmt.Sprintf(m, name, in, minimum)
}
return &Validation{
code: MinFailCode,
@@ -545,12 +553,12 @@ func ReadOnly(name, in string, value interface{}) *Validation {
}
// TooLong error for when a string is too long
-func TooLong(name, in string, max int64, value interface{}) *Validation {
+func TooLong(name, in string, maximum int64, value interface{}) *Validation {
var msg string
if in == "" {
- msg = fmt.Sprintf(tooLongMessageNoIn, name, max)
+ msg = fmt.Sprintf(tooLongMessageNoIn, name, maximum)
} else {
- msg = fmt.Sprintf(tooLongMessage, name, in, max)
+ msg = fmt.Sprintf(tooLongMessage, name, in, maximum)
}
return &Validation{
code: TooLongFailCode,
@@ -562,12 +570,12 @@ func TooLong(name, in string, max int64, value interface{}) *Validation {
}
// TooShort error for when a string is too short
-func TooShort(name, in string, min int64, value interface{}) *Validation {
+func TooShort(name, in string, minimum int64, value interface{}) *Validation {
var msg string
if in == "" {
- msg = fmt.Sprintf(tooShortMessageNoIn, name, min)
+ msg = fmt.Sprintf(tooShortMessageNoIn, name, minimum)
} else {
- msg = fmt.Sprintf(tooShortMessage, name, in, min)
+ msg = fmt.Sprintf(tooShortMessage, name, in, minimum)
}
return &Validation{
diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
new file mode 100644
index 00000000..50063062
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -0,0 +1,62 @@
+version: "2"
+linters:
+ default: all
+ disable:
+ - cyclop
+ - depguard
+ - errchkjson
+ - errorlint
+ - exhaustruct
+ - forcetypeassert
+ - funlen
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - godot
+ - godox
+ - gosmopolitan
+ - inamedparam
+ - ireturn
+ - lll
+ - musttag
+ - nestif
+ - nlreturn
+ - nonamedreturns
+ - paralleltest
+ - testpackage
+ - thelper
+ - tparallel
+ - unparam
+ - varnamelen
+ - whitespace
+ - wrapcheck
+ - wsl
+ settings:
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+ gocyclo:
+ min-complexity: 45
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml
deleted file mode 100644
index 03a22fe0..00000000
--- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-after_success:
-- bash <(curl -s https://codecov.io/bash)
-go:
-- 1.14.x
-- 1.15.x
-install:
-- GO111MODULE=off go get -u gotest.tools/gotestsum
-env:
-- GO111MODULE=on
-language: go
-notifications:
- slack:
- secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw=
-script:
-- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
index 813788af..0108f1d5 100644
--- a/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -1,6 +1,10 @@
-# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
+# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
-[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
An implementation of JSON Pointer - Go language
## Status
diff --git a/vendor/github.com/go-openapi/jsonpointer/errors.go b/vendor/github.com/go-openapi/jsonpointer/errors.go
new file mode 100644
index 00000000..b84343d9
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/errors.go
@@ -0,0 +1,18 @@
+package jsonpointer
+
+type pointerError string
+
+func (e pointerError) Error() string {
+ return string(e)
+}
+
+const (
+ // ErrPointer is an error raised by the jsonpointer package
+ ErrPointer pointerError = "JSON pointer error"
+
+ // ErrInvalidStart states that a JSON pointer must start with a separator ("/")
+ ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator
+
+ // ErrUnsupportedValueType indicates that a value of the wrong type is being set
+ ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values"
+)
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
index 7df9853d..61362105 100644
--- a/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -26,6 +26,7 @@
package jsonpointer
import (
+ "encoding/json"
"errors"
"fmt"
"reflect"
@@ -38,8 +39,6 @@ import (
const (
emptyPointer = ``
pointerSeparator = `/`
-
- invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
)
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
@@ -48,13 +47,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
// JSONPointable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONPointable interface {
- JSONLookup(string) (interface{}, error)
+ JSONLookup(string) (any, error)
}
// JSONSetable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONSetable interface {
- JSONSet(string, interface{}) error
+ JSONSet(string, any) error
}
// New creates a new json pointer for the given string
@@ -78,12 +77,10 @@ func (p *Pointer) parse(jsonPointerString string) error {
if jsonPointerString != emptyPointer {
if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
- err = errors.New(invalidStart)
+ err = errors.Join(ErrInvalidStart, ErrPointer)
} else {
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
- for _, referenceToken := range referenceTokens[1:] {
- p.referenceTokens = append(p.referenceTokens, referenceToken)
- }
+ p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
}
}
@@ -91,42 +88,62 @@ func (p *Pointer) parse(jsonPointerString string) error {
}
// Get uses the pointer to retrieve a value from a JSON document
-func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
return p.get(document, swag.DefaultJSONNameProvider)
}
// Set uses the pointer to set a value from a JSON document
-func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
+func (p *Pointer) Set(document any, value any) (any, error) {
return document, p.set(document, value, swag.DefaultJSONNameProvider)
}
// GetForToken gets a value for a json pointer token 1 level deep
-func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
+func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
}
// SetForToken gets a value for a json pointer token 1 level deep
-func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
+func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
-func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func isNil(input any) bool {
+ if input == nil {
+ return true
+ }
+
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
+func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
+ if isNil(node) {
+ return nil, kind, fmt.Errorf("nil value has no field %q: %w", decodedToken, ErrPointer)
+ }
- if rValue.Type().Implements(jsonPointableType) {
- r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ switch typed := node.(type) {
+ case JSONPointable:
+ r, err := typed.JSONLookup(decodedToken)
if err != nil {
return nil, kind, err
}
return r, kind, nil
+ case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
+ return getSingleImpl(*typed, decodedToken, nameProvider)
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
- return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
+ return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
}
fld := rValue.FieldByName(nm)
return fld.Interface(), kind, nil
@@ -138,7 +155,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
if mv.IsValid() {
return mv.Interface(), kind, nil
}
- return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
+ return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
case reflect.Slice:
tokenIndex, err := strconv.Atoi(decodedToken)
@@ -147,21 +164,26 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
sLength := rValue.Len()
if tokenIndex < 0 || tokenIndex >= sLength {
- return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex)
+ return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer)
}
elem := rValue.Index(tokenIndex)
return elem.Interface(), kind, nil
default:
- return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
+ return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
}
}
-func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
+func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
rValue := reflect.Indirect(reflect.ValueOf(node))
+ // Check for nil to prevent panic when calling rValue.Type()
+ if isNil(node) {
+ return fmt.Errorf("cannot set field %q on nil value: %w", decodedToken, ErrPointer)
+ }
+
if ns, ok := node.(JSONSetable); ok { // pointer impl
return ns.JSONSet(decodedToken, data)
}
@@ -170,11 +192,11 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
return node.(JSONSetable).JSONSet(decodedToken, data)
}
- switch rValue.Kind() {
+ switch rValue.Kind() { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
- return fmt.Errorf("object has no field %q", decodedToken)
+ return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
}
fld := rValue.FieldByName(nm)
if fld.IsValid() {
@@ -194,23 +216,23 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
}
sLength := rValue.Len()
if tokenIndex < 0 || tokenIndex >= sLength {
- return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
+ return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
}
elem := rValue.Index(tokenIndex)
if !elem.CanSet() {
- return fmt.Errorf("can't set slice index %s to %v", decodedToken, data)
+ return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer)
}
elem.Set(reflect.ValueOf(data))
return nil
default:
- return fmt.Errorf("invalid token reference %q", decodedToken)
+ return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
}
}
-func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
if nameProvider == nil {
nameProvider = swag.DefaultJSONNameProvider
@@ -224,15 +246,13 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
}
for _, token := range p.referenceTokens {
-
decodedToken := Unescape(token)
r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
if err != nil {
return nil, knd, err
}
- node, kind = r, knd
-
+ node = r
}
rValue := reflect.ValueOf(node)
@@ -241,11 +261,14 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
return node, kind, nil
}
-func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
+func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
- return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+ return errors.Join(
+ ErrUnsupportedValueType,
+ ErrPointer,
+ )
}
if nameProvider == nil {
@@ -267,6 +290,11 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
return setSingleImpl(node, data, decodedToken, nameProvider)
}
+ // Check for nil during traversal
+ if isNil(node) {
+ return fmt.Errorf("cannot traverse through nil value at %q: %w", decodedToken, ErrPointer)
+ }
+
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
@@ -284,11 +312,11 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
continue
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
- return fmt.Errorf("object has no field %q", decodedToken)
+ return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
}
fld := rValue.FieldByName(nm)
if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
@@ -302,7 +330,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
mv := rValue.MapIndex(kv)
if !mv.IsValid() {
- return fmt.Errorf("object has no key %q", decodedToken)
+ return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
}
if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
node = mv.Addr().Interface()
@@ -317,7 +345,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
}
sLength := rValue.Len()
if tokenIndex < 0 || tokenIndex >= sLength {
- return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
+ return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
}
elem := rValue.Index(tokenIndex)
@@ -328,7 +356,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
node = elem.Interface()
default:
- return fmt.Errorf("invalid token reference %q", decodedToken)
+ return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
}
}
@@ -363,6 +391,128 @@ func (p *Pointer) String() string {
return pointerString
}
+func (p *Pointer) Offset(document string) (int64, error) {
+ dec := json.NewDecoder(strings.NewReader(document))
+ var offset int64
+ for _, ttk := range p.DecodedTokens() {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ offset, err = offsetSingleObject(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ case '[':
+ offset, err = offsetSingleArray(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
+ }
+ }
+ return offset, nil
+}
+
+func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
+ for dec.More() {
+ offset := dec.InputOffset()
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ case string:
+ if tk == decodedToken {
+ return offset, nil
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
+ }
+ }
+ return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
+}
+
+func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
+ idx, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer)
+ }
+ var i int
+ for i = 0; i < idx && dec.More(); i++ {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ if !dec.More() {
+ return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
+ }
+ return dec.InputOffset(), nil
+}
+
+// drainSingle drains a single level of object or array.
+// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
+func drainSingle(dec *json.Decoder) error {
+ for dec.More() {
+ tk, err := dec.Token()
+ if err != nil {
+ return err
+ }
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // Consumes the ending delim
+ if _, err := dec.Token(); err != nil {
+ return err
+ }
+ return nil
+}
+
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /
@@ -377,14 +527,14 @@ const (
// Unescape unescapes a json pointer reference token string to the original representation
func Unescape(token string) string {
- step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
- step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+ step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
+ step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
return step2
}
// Escape escapes a pointer reference token string
func Escape(token string) string {
- step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
- step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+ step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
+ step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
return step2
}
diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml
index f9381aee..22f8d21c 100644
--- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml
+++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml
@@ -4,38 +4,58 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 30
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
+
linters:
enable-all: true
disable:
- maligned
+ - unparam
- lll
+ - gochecknoinits
- gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - errorlint
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml
deleted file mode 100644
index 05482f4b..00000000
--- a/vendor/github.com/go-openapi/jsonreference/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-after_success:
-- bash <(curl -s https://codecov.io/bash)
-go:
-- 1.14.x
-- 1.x
-install:
-- go get gotest.tools/gotestsum
-jobs:
- include:
- # include linting job, but only for latest go version and amd64 arch
- - go: 1.x
- arch: amd64
- install:
- go get github.com/golangci/golangci-lint/cmd/golangci-lint
- script:
- - golangci-lint run --new-from-rev master
-env:
-- GO111MODULE=on
-language: go
-notifications:
- slack:
- secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ=
-script:
-- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md
index b94753aa..c7fc2049 100644
--- a/vendor/github.com/go-openapi/jsonreference/README.md
+++ b/vendor/github.com/go-openapi/jsonreference/README.md
@@ -1,15 +1,19 @@
-# gojsonreference [](https://travis-ci.org/go-openapi/jsonreference) [](https://codecov.io/gh/go-openapi/jsonreference) [](https://slackin.goswagger.io)
+# gojsonreference [](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonreference)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonreference)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonreference)
-[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonreference)
An implementation of JSON Reference - Go language
## Status
Feature complete. Stable API
## Dependencies
-https://github.com/go-openapi/jsonpointer
+* https://github.com/go-openapi/jsonpointer
## References
-http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
-http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
+* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
index 8956c308..f0610cf1 100644
--- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
+++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
@@ -7,8 +7,8 @@ import (
)
const (
- defaultHttpPort = ":80"
- defaultHttpsPort = ":443"
+ defaultHTTPPort = ":80"
+ defaultHTTPSPort = ":443"
)
// Regular expressions used by the normalizations
@@ -18,18 +18,24 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
// NormalizeURL will normalize the specified URL
// This was added to replace a previous call to the no longer maintained purell library:
// The call that was used looked like the following:
-// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
+//
+// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
//
// To explain all that was included in the call above, purell.FlagsSafe was really just the following:
-// - FlagLowercaseScheme
-// - FlagLowercaseHost
-// - FlagRemoveDefaultPort
-// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
+// - FlagLowercaseScheme
+// - FlagLowercaseHost
+// - FlagRemoveDefaultPort
+// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
+//
+// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment.
func NormalizeURL(u *url.URL) {
lowercaseScheme(u)
lowercaseHost(u)
removeDefaultPort(u)
removeDuplicateSlashes(u)
+
+ u.RawPath = ""
+ u.RawFragment = ""
}
func lowercaseScheme(u *url.URL) {
@@ -48,7 +54,7 @@ func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
- if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
+ if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) {
return ""
}
return val
diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml
index d48b4a51..22f8d21c 100644
--- a/vendor/github.com/go-openapi/loads/.golangci.yml
+++ b/vendor/github.com/go-openapi/loads/.golangci.yml
@@ -4,41 +4,58 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 30
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
linters:
enable-all: true
disable:
- maligned
+ - unparam
- lll
- - gochecknoglobals
- gochecknoinits
+ - gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - errorlint
+ - gofumpt
- paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md
index df1f6264..f8bd440d 100644
--- a/vendor/github.com/go-openapi/loads/README.md
+++ b/vendor/github.com/go-openapi/loads/README.md
@@ -1,4 +1,4 @@
-# Loads OAI specs [](https://travis-ci.org/go-openapi/loads) [](https://codecov.io/gh/go-openapi/loads) [](https://slackin.goswagger.io) [](https://github.com/go-openapi/loads/actions?query=workflow%3A"Go+Test")
+# Loads OAI specs [](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/loads)
[](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [](http://godoc.org/github.com/go-openapi/loads)
[](https://goreportcard.com/report/github.com/go-openapi/loads)
diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go
index 3046da4c..5bcaef5d 100644
--- a/vendor/github.com/go-openapi/loads/doc.go
+++ b/vendor/github.com/go-openapi/loads/doc.go
@@ -12,10 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-/*
-Package loads provides document loading methods for swagger (OAI) specifications.
-
-It is used by other go-openapi packages to load and run analysis on local or remote spec documents.
-
-*/
+// Package loads provides document loading methods for swagger (OAI) specifications.
+//
+// It is used by other go-openapi packages to load and run analysis on local or remote spec documents.
package loads
diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go
index 44bd32b5..b2d1e034 100644
--- a/vendor/github.com/go-openapi/loads/loaders.go
+++ b/vendor/github.com/go-openapi/loads/loaders.go
@@ -21,7 +21,7 @@ var (
func init() {
jsonLoader := &loader{
DocLoaderWithMatch: DocLoaderWithMatch{
- Match: func(pth string) bool {
+ Match: func(_ string) bool {
return true
},
Fn: JSONDoc,
@@ -86,7 +86,7 @@ func (l *loader) Load(path string) (json.RawMessage, error) {
return nil, erp
}
- var lastErr error = errors.New("no loader matched") // default error if no match was found
+ lastErr := errors.New("no loader matched") // default error if no match was found
for ldr := l; ldr != nil; ldr = ldr.Next {
if ldr.Match != nil && !ldr.Match(path) {
continue
@@ -118,9 +118,8 @@ func JSONDoc(path string) (json.RawMessage, error) {
// This sets the configuration at the package level.
//
// NOTE:
-// * this updates the default loader used by github.com/go-openapi/spec
-// * since this sets package level globals, you shouln't call this concurrently
-//
+// - this updates the default loader used by github.com/go-openapi/spec
+// - since this sets package level globals, you shouln't call this concurrently
func AddLoader(predicate DocMatcher, load DocLoader) {
loaders = loaders.WithHead(&loader{
DocLoaderWithMatch: DocLoaderWithMatch{
diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go
index 93c8d4b8..c9039cd5 100644
--- a/vendor/github.com/go-openapi/loads/spec.go
+++ b/vendor/github.com/go-openapi/loads/spec.go
@@ -38,8 +38,8 @@ type Document struct {
specFilePath string
origSpec *spec.Swagger
schema *spec.Schema
- raw json.RawMessage
pathLoader *loader
+ raw json.RawMessage
}
// JSONSpec loads a spec from a json document
@@ -49,7 +49,14 @@ func JSONSpec(path string, options ...LoaderOption) (*Document, error) {
return nil, err
}
// convert to json
- return Analyzed(data, "", options...)
+ doc, err := Analyzed(data, "", options...)
+ if err != nil {
+ return nil, err
+ }
+
+ doc.specFilePath = path
+
+ return doc, nil
}
// Embedded returns a Document based on embedded specs. No analysis is required
@@ -71,7 +78,6 @@ func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, e
// Spec loads a new spec document from a local or remote path
func Spec(path string, options ...LoaderOption) (*Document, error) {
-
ldr := loaderFromOptions(options)
b, err := ldr.Load(path)
@@ -84,12 +90,10 @@ func Spec(path string, options ...LoaderOption) (*Document, error) {
return nil, err
}
- if document != nil {
- document.specFilePath = path
- document.pathLoader = ldr
- }
+ document.specFilePath = path
+ document.pathLoader = ldr
- return document, err
+ return document, nil
}
// Analyzed creates a new analyzed spec document for a root json.RawMessage.
@@ -117,7 +121,7 @@ func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*D
}
d := &Document{
- Analyzer: analysis.New(swspec),
+ Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc
schema: spec.MustLoadSwagger20Schema(),
spec: swspec,
raw: raw,
@@ -152,9 +156,8 @@ func trimData(in json.RawMessage) (json.RawMessage, error) {
return d, nil
}
-// Expanded expands the ref fields in the spec document and returns a new spec document
+// Expanded expands the $ref fields in the spec document and returns a new spec document
func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
-
swspec := new(spec.Swagger)
if err := json.Unmarshal(d.raw, swspec); err != nil {
return nil, err
@@ -163,6 +166,9 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
var expandOptions *spec.ExpandOptions
if len(options) > 0 {
expandOptions = options[0]
+ if expandOptions.RelativeBase == "" {
+ expandOptions.RelativeBase = d.specFilePath
+ }
} else {
expandOptions = &spec.ExpandOptions{
RelativeBase: d.specFilePath,
@@ -194,7 +200,7 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
return dd, nil
}
-// BasePath the base path for this spec
+// BasePath the base path for the API specified by this spec
func (d *Document) BasePath() string {
return d.spec.BasePath
}
@@ -242,8 +248,11 @@ func (d *Document) ResetDefinitions() *Document {
// Pristine creates a new pristine document instance based on the input data
func (d *Document) Pristine() *Document {
- dd, _ := Analyzed(d.Raw(), d.Version())
+ raw, _ := json.Marshal(d.Spec())
+ dd, _ := Analyzed(raw, d.Version())
dd.pathLoader = d.pathLoader
+ dd.specFilePath = d.specFilePath
+
return dd
}
diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml
index b1aa7928..1c75557b 100644
--- a/vendor/github.com/go-openapi/runtime/.golangci.yml
+++ b/vendor/github.com/go-openapi/runtime/.golangci.yml
@@ -1,44 +1,62 @@
linters-settings:
govet:
- # Using err repeatedly considered as shadowing.
- check-shadowing: false
+ check-shadowing: true
golint:
min-confidence: 0
gocyclo:
- min-complexity: 30
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
+
linters:
+ enable-all: true
disable:
+ - nilerr # nilerr crashes on this repo
- maligned
+ - unparam
- lll
+ - gochecknoinits
- gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - errorlint
- - noctx
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
- interfacer
- - nilerr
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md
index 5b1ec649..b07e0ad9 100644
--- a/vendor/github.com/go-openapi/runtime/README.md
+++ b/vendor/github.com/go-openapi/runtime/README.md
@@ -1,7 +1,10 @@
-# runtime [](https://travis-ci.org/go-openapi/runtime) [](https://codecov.io/gh/go-openapi/runtime) [](https://slackin.goswagger.io)
+# runtime [](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/runtime)
-[](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [](http://godoc.org/github.com/go-openapi/runtime)
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/runtime)
+[](https://goreportcard.com/report/github.com/go-openapi/runtime)
-# golang Open-API toolkit - runtime
+# go OpenAPI toolkit runtime
-The runtime component for use in codegeneration or as untyped usage.
+The runtime component for use in code generation or as untyped usage.
diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go
index 6eb6ceb5..f8fb4822 100644
--- a/vendor/github.com/go-openapi/runtime/bytestream.go
+++ b/vendor/github.com/go-openapi/runtime/bytestream.go
@@ -38,9 +38,16 @@ type byteStreamOpts struct {
Close bool
}
-// ByteStreamConsumer creates a consumer for byte streams,
-// takes a Writer/BinaryUnmarshaler interface or binary slice by reference,
-// and reads from the provided reader
+// ByteStreamConsumer creates a consumer for byte streams.
+//
+// The consumer consumes from a provided reader into the data passed by reference.
+//
+// Supported output underlying types and interfaces, prioritized in this order:
+// - io.ReaderFrom (for maximum control)
+// - io.Writer (performs io.Copy)
+// - encoding.BinaryUnmarshaler
+// - *string
+// - *[]byte
func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
var vals byteStreamOpts
for _, opt := range opts {
@@ -51,44 +58,70 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
if reader == nil {
return errors.New("ByteStreamConsumer requires a reader") // early exit
}
+ if data == nil {
+ return errors.New("nil destination for ByteStreamConsumer")
+ }
- close := defaultCloser
+ closer := defaultCloser
if vals.Close {
- if cl, ok := reader.(io.Closer); ok {
- close = cl.Close
+ if cl, isReaderCloser := reader.(io.Closer); isReaderCloser {
+ closer = cl.Close
}
}
- //nolint:errcheck // closing a reader wouldn't fail.
- defer close()
+ defer func() {
+ _ = closer()
+ }()
- if wrtr, ok := data.(io.Writer); ok {
- _, err := io.Copy(wrtr, reader)
+ if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom {
+ _, err := readerFrom.ReadFrom(reader)
return err
}
- buf := new(bytes.Buffer)
+ if writer, isDataWriter := data.(io.Writer); isDataWriter {
+ _, err := io.Copy(writer, reader)
+ return err
+ }
+
+ // buffers input before writing to data
+ var buf bytes.Buffer
_, err := buf.ReadFrom(reader)
if err != nil {
return err
}
b := buf.Bytes()
- if bu, ok := data.(encoding.BinaryUnmarshaler); ok {
- return bu.UnmarshalBinary(b)
- }
+ switch destinationPointer := data.(type) {
+ case encoding.BinaryUnmarshaler:
+ return destinationPointer.UnmarshalBinary(b)
+ case *any:
+ switch (*destinationPointer).(type) {
+ case string:
+ *destinationPointer = string(b)
+
+ return nil
+
+ case []byte:
+ *destinationPointer = b
- if data != nil {
- if str, ok := data.(*string); ok {
- *str = string(b)
return nil
}
- }
+ default:
+ // check for the underlying type to be pointer to []byte or string,
+ if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr {
+ return errors.New("destination must be a pointer")
+ }
- if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr {
v := reflect.Indirect(reflect.ValueOf(data))
- if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
v.SetBytes(b)
return nil
+
+ case t.Kind() == reflect.String:
+ v.SetString(string(b))
+ return nil
}
}
@@ -97,67 +130,87 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
})
}
-// ByteStreamProducer creates a producer for byte streams,
-// takes a Reader/BinaryMarshaler interface or binary slice,
-// and writes to a writer (essentially a pipe)
+// ByteStreamProducer creates a producer for byte streams.
+//
+// The producer takes input data then writes to an output writer (essentially as a pipe).
+//
+// Supported input underlying types and interfaces, prioritized in this order:
+// - io.WriterTo (for maximum control)
+// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting.
+// - encoding.BinaryMarshaler
+// - error (writes as a string)
+// - []byte
+// - string
+// - struct, other slices: writes as JSON
func ByteStreamProducer(opts ...byteStreamOpt) Producer {
var vals byteStreamOpts
for _, opt := range opts {
opt(&vals)
}
+
return ProducerFunc(func(writer io.Writer, data interface{}) error {
if writer == nil {
return errors.New("ByteStreamProducer requires a writer") // early exit
}
- close := defaultCloser
+ if data == nil {
+ return errors.New("nil data for ByteStreamProducer")
+ }
+
+ closer := defaultCloser
if vals.Close {
- if cl, ok := writer.(io.Closer); ok {
- close = cl.Close
+ if cl, isWriterCloser := writer.(io.Closer); isWriterCloser {
+ closer = cl.Close
}
}
- //nolint:errcheck // TODO: closing a writer would fail.
- defer close()
+ defer func() {
+ _ = closer()
+ }()
- if rc, ok := data.(io.ReadCloser); ok {
+ if rc, isDataCloser := data.(io.ReadCloser); isDataCloser {
defer rc.Close()
}
- if rdr, ok := data.(io.Reader); ok {
- _, err := io.Copy(writer, rdr)
+ switch origin := data.(type) {
+ case io.WriterTo:
+ _, err := origin.WriteTo(writer)
return err
- }
- if bm, ok := data.(encoding.BinaryMarshaler); ok {
- bytes, err := bm.MarshalBinary()
+ case io.Reader:
+ _, err := io.Copy(writer, origin)
+ return err
+
+ case encoding.BinaryMarshaler:
+ bytes, err := origin.MarshalBinary()
if err != nil {
return err
}
_, err = writer.Write(bytes)
return err
- }
- if data != nil {
- if str, ok := data.(string); ok {
- _, err := writer.Write([]byte(str))
- return err
- }
-
- if e, ok := data.(error); ok {
- _, err := writer.Write([]byte(e.Error()))
- return err
- }
+ case error:
+ _, err := writer.Write([]byte(origin.Error()))
+ return err
+ default:
v := reflect.Indirect(reflect.ValueOf(data))
- if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
_, err := writer.Write(v.Bytes())
return err
- }
- if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice {
+
+ case t.Kind() == reflect.String:
+ _, err := writer.Write([]byte(v.String()))
+ return err
+
+ case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice:
b, err := swag.WriteJSON(data)
if err != nil {
return err
}
+
_, err = writer.Write(b)
return err
}
diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go
index bc7b7fa4..7dd6b51c 100644
--- a/vendor/github.com/go-openapi/runtime/client/keepalive.go
+++ b/vendor/github.com/go-openapi/runtime/client/keepalive.go
@@ -48,8 +48,7 @@ func (d *drainingReadCloser) Close() error {
// If the reader side (a HTTP server) is misbehaving, it still may send
// some bytes, but the closer ignores them to keep the underling
// connection open.
- //nolint:errcheck
- io.Copy(io.Discard, d.rdr)
+ _, _ = io.Copy(io.Discard, d.rdr)
}
return d.rdr.Close()
}
diff --git a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go
index 8a38ea3e..256cd1b4 100644
--- a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go
+++ b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go
@@ -11,7 +11,8 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
"go.opentelemetry.io/otel/trace"
)
@@ -131,8 +132,11 @@ func (t *openTelemetryTransport) Submit(op *runtime.ClientOperation) (interface{
op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
if span != nil {
statusCode := response.Code()
- span.SetAttributes(attribute.Int(string(semconv.HTTPStatusCodeKey), statusCode))
- span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindClient))
+ // NOTE: this is replaced by semconv.HTTPResponseStatusCode in semconv v1.21
+ span.SetAttributes(semconv.HTTPStatusCode(statusCode))
+ // NOTE: the conversion from HTTP status code to trace code is no longer available with
+ // semconv v1.21
+ span.SetStatus(httpconv.ServerStatus(statusCode))
}
return reader.ReadResponse(response, consumer)
diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go
index 4c00ed3a..c4a891d0 100644
--- a/vendor/github.com/go-openapi/runtime/client/request.go
+++ b/vendor/github.com/go-openapi/runtime/client/request.go
@@ -16,6 +16,7 @@ package client
import (
"bytes"
+ "context"
"fmt"
"io"
"log"
@@ -35,7 +36,7 @@ import (
)
// NewRequest creates a new swagger http client request
-func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) (*request, error) {
+func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) *request {
return &request{
pathPattern: pathPattern,
method: method,
@@ -44,7 +45,7 @@ func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter)
query: make(url.Values),
timeout: DefaultTimeout,
getBody: getRequestBuffer,
- }, nil
+ }
}
// Request represents a swagger client request.
@@ -102,7 +103,7 @@ func logClose(err error, pw *io.PipeWriter) {
}
}
-func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) {
+func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) { //nolint:gocyclo,maintidx
// build the data
if err := r.writer.WriteToRequest(r, registry); err != nil {
return nil, err
@@ -170,7 +171,7 @@ func (r *request) buildHTTP(mediaType, basePath string, producers map[string]run
// Need to read the data so that we can detect the content type
buf := make([]byte, 512)
size, err := fi.Read(buf)
- if err != nil {
+ if err != nil && err != io.EOF {
logClose(err, pw)
return
}
@@ -317,13 +318,13 @@ DoneChoosingBodySource:
urlPath := path.Join(basePathURL.Path, pathPatternURL.Path)
for k, v := range r.pathParams {
- urlPath = strings.Replace(urlPath, "{"+k+"}", url.PathEscape(v), -1)
+ urlPath = strings.ReplaceAll(urlPath, "{"+k+"}", url.PathEscape(v))
}
if reinstateSlash {
- urlPath = urlPath + "/"
+ urlPath += "/"
}
- req, err := http.NewRequest(r.method, urlPath, body)
+ req, err := http.NewRequestWithContext(context.Background(), r.method, urlPath, body)
if err != nil {
return nil, err
}
@@ -361,7 +362,7 @@ func (r *request) GetMethod() string {
func (r *request) GetPath() string {
path := r.pathPattern
for k, v := range r.pathParams {
- path = strings.Replace(path, "{"+k+"}", v, -1)
+ path = strings.ReplaceAll(path, "{"+k+"}", v)
}
return path
}
diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go
index ccec0413..5bd4d75d 100644
--- a/vendor/github.com/go-openapi/runtime/client/runtime.go
+++ b/vendor/github.com/go-openapi/runtime/client/runtime.go
@@ -22,6 +22,7 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
+ "errors"
"fmt"
"mime"
"net/http"
@@ -31,13 +32,18 @@ import (
"sync"
"time"
+ "github.com/go-openapi/strfmt"
"github.com/opentracing/opentracing-go"
"github.com/go-openapi/runtime"
"github.com/go-openapi/runtime/logger"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/runtime/yamlpc"
- "github.com/go-openapi/strfmt"
+)
+
+const (
+ schemeHTTP = "http"
+ schemeHTTPS = "https"
)
// TLSClientOptions to configure client authentication with mutual TLS
@@ -70,7 +76,7 @@ type TLSClientOptions struct {
LoadedCA *x509.Certificate
// LoadedCAPool specifies a pool of RootCAs to use when validating the server's TLS certificate.
- // If set, it will be combined with the the other loaded certificates (see LoadedCA and CA).
+ // If set, it will be combined with the other loaded certificates (see LoadedCA and CA).
// If neither LoadedCA or CA is set, the provided pool with override the system
// certificate pool.
// The caller must not use the supplied pool after calling TLSClientAuth.
@@ -112,7 +118,9 @@ type TLSClientOptions struct {
// TLSClientAuth creates a tls.Config for mutual auth
func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
// create client tls config
- cfg := &tls.Config{}
+ cfg := &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ }
// load client cert if specified
if opts.Certificate != "" {
@@ -136,7 +144,7 @@ func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
return nil, fmt.Errorf("tls client priv key: %v", err)
}
default:
- return nil, fmt.Errorf("tls client priv key: unsupported key type")
+ return nil, errors.New("tls client priv key: unsupported key type")
}
block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes}
@@ -158,11 +166,12 @@ func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
// When no CA certificate is provided, default to the system cert pool
// that way when a request is made to a server known by the system trust store,
// the name is still verified
- if opts.LoadedCA != nil {
+ switch {
+ case opts.LoadedCA != nil:
caCertPool := basePool(opts.LoadedCAPool)
caCertPool.AddCert(opts.LoadedCA)
cfg.RootCAs = caCertPool
- } else if opts.CA != "" {
+ case opts.CA != "":
// load ca cert
caCert, err := os.ReadFile(opts.CA)
if err != nil {
@@ -171,7 +180,7 @@ func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
caCertPool := basePool(opts.LoadedCAPool)
caCertPool.AppendCertsFromPEM(caCert)
cfg.RootCAs = caCertPool
- } else if opts.LoadedCAPool != nil {
+ case opts.LoadedCAPool != nil:
cfg.RootCAs = opts.LoadedCAPool
}
@@ -227,7 +236,7 @@ type Runtime struct {
Host string
BasePath string
Formats strfmt.Registry
- Context context.Context
+ Context context.Context //nolint:containedctx // we precisely want this type to contain the request context
Debug bool
logger logger.Logger
@@ -316,7 +325,7 @@ func (r *Runtime) pickScheme(schemes []string) string {
if v := r.selectScheme(schemes); v != "" {
return v
}
- return "http"
+ return schemeHTTP
}
func (r *Runtime) selectScheme(schemes []string) string {
@@ -327,9 +336,9 @@ func (r *Runtime) selectScheme(schemes []string) string {
scheme := schemes[0]
// prefer https, but skip when not possible
- if scheme != "https" && schLen > 1 {
+ if scheme != schemeHTTPS && schLen > 1 {
for _, sch := range schemes {
- if sch == "https" {
+ if sch == schemeHTTPS {
scheme = sch
break
}
@@ -368,17 +377,14 @@ func (r *Runtime) EnableConnectionReuse() {
}
// takes a client operation and creates equivalent http.Request
-func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) {
+func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) { //nolint:revive,stylecheck
params, _, auth := operation.Params, operation.Reader, operation.AuthInfo
- request, err := newRequest(operation.Method, operation.PathPattern, params)
- if err != nil {
- return nil, nil, err
- }
+ request := newRequest(operation.Method, operation.PathPattern, params)
var accept []string
accept = append(accept, operation.ProducesMediaTypes...)
- if err = request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil {
+ if err := request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil {
return nil, nil, err
}
@@ -420,7 +426,7 @@ func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*reques
return request, req, nil
}
-func (r *Runtime) CreateHttpRequest(operation *runtime.ClientOperation) (req *http.Request, err error) {
+func (r *Runtime) CreateHttpRequest(operation *runtime.ClientOperation) (req *http.Request, err error) { //nolint:revive,stylecheck
_, req, err = r.createHttpRequest(operation)
return
}
@@ -450,27 +456,36 @@ func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error
r.logger.Debugf("%s\n", string(b))
}
- var hasTimeout bool
- pctx := operation.Context
- if pctx == nil {
- pctx = r.Context
- } else {
- hasTimeout = true
+ var parentCtx context.Context
+ switch {
+ case operation.Context != nil:
+ parentCtx = operation.Context
+ case r.Context != nil:
+ parentCtx = r.Context
+ default:
+ parentCtx = context.Background()
}
- if pctx == nil {
- pctx = context.Background()
- }
- var ctx context.Context
- var cancel context.CancelFunc
- if hasTimeout {
- ctx, cancel = context.WithCancel(pctx)
+
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+ if request.timeout == 0 {
+ // There may be a deadline in the context passed to the operation.
+ // Otherwise, there is no timeout set.
+ ctx, cancel = context.WithCancel(parentCtx)
} else {
- ctx, cancel = context.WithTimeout(pctx, request.timeout)
+ // Sets the timeout passed from request params (by default runtime.DefaultTimeout).
+ // If there is already a deadline in the parent context, the shortest will
+ // apply.
+ ctx, cancel = context.WithTimeout(parentCtx, request.timeout)
}
defer cancel()
- client := operation.Client
- if client == nil {
+ var client *http.Client
+ if operation.Client != nil {
+ client = operation.Client
+ } else {
client = r.client
}
req = req.WithContext(ctx)
@@ -481,7 +496,7 @@ func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error
defer res.Body.Close()
ct := res.Header.Get(runtime.HeaderContentType)
- if ct == "" { // this should really really never occur
+ if ct == "" { // this should really never occur
ct = r.DefaultMediaType
}
@@ -526,7 +541,7 @@ func (r *Runtime) SetLogger(logger logger.Logger) {
middleware.Logger = logger
}
-type ClientResponseFunc = func(*http.Response) runtime.ClientResponse
+type ClientResponseFunc = func(*http.Response) runtime.ClientResponse //nolint:revive
// SetResponseReader changes the response reader implementation.
func (r *Runtime) SetResponseReader(f ClientResponseFunc) {
diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go
index fa21eacf..5a5d6356 100644
--- a/vendor/github.com/go-openapi/runtime/client_operation.go
+++ b/vendor/github.com/go-openapi/runtime/client_operation.go
@@ -30,12 +30,12 @@ type ClientOperation struct {
AuthInfo ClientAuthInfoWriter
Params ClientRequestWriter
Reader ClientResponseReader
- Context context.Context
+ Context context.Context //nolint:containedctx // we precisely want this type to contain the request context
Client *http.Client
}
// A ClientTransport implementor knows how to submit Request objects to some destination
type ClientTransport interface {
- //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error)
+ // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error)
Submit(*ClientOperation) (interface{}, error)
}
diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go
index d4d2b58f..4ebb2dea 100644
--- a/vendor/github.com/go-openapi/runtime/client_request.go
+++ b/vendor/github.com/go-openapi/runtime/client_request.go
@@ -37,8 +37,8 @@ type ClientRequestWriter interface {
}
// ClientRequest is an interface for things that know how to
-// add information to a swagger client request
-type ClientRequest interface {
+// add information to a swagger client request.
+type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters
SetHeaderParam(string, ...string) error
GetHeaderParams() http.Header
diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go
index d807bd91..c9597bcd 100644
--- a/vendor/github.com/go-openapi/runtime/csv.go
+++ b/vendor/github.com/go-openapi/runtime/csv.go
@@ -16,62 +16,335 @@ package runtime
import (
"bytes"
+ "context"
+ "encoding"
"encoding/csv"
"errors"
+ "fmt"
"io"
+ "reflect"
+
+ "golang.org/x/sync/errgroup"
)
-// CSVConsumer creates a new CSV consumer
-func CSVConsumer() Consumer {
+// CSVConsumer creates a new CSV consumer.
+//
+// The consumer consumes CSV records from a provided reader into the data passed by reference.
+//
+// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...).
+// The defaults are those of the standard library's csv.Reader and csv.Writer.
+//
+// Supported output underlying types and interfaces, prioritized in this order:
+// - *csv.Writer
+// - CSVWriter (writer options are ignored)
+// - io.Writer (as raw bytes)
+// - io.ReaderFrom (as raw bytes)
+// - encoding.BinaryUnmarshaler (as raw bytes)
+// - *[][]string (as a collection of records)
+// - *[]byte (as raw bytes)
+// - *string (a raw bytes)
+//
+// The consumer prioritizes situations where buffering the input is not required.
+func CSVConsumer(opts ...CSVOpt) Consumer {
+ o := csvOptsWithDefaults(opts)
+
return ConsumerFunc(func(reader io.Reader, data interface{}) error {
if reader == nil {
return errors.New("CSVConsumer requires a reader")
}
+ if data == nil {
+ return errors.New("nil destination for CSVConsumer")
+ }
csvReader := csv.NewReader(reader)
- writer, ok := data.(io.Writer)
- if !ok {
- return errors.New("data type must be io.Writer")
- }
- csvWriter := csv.NewWriter(writer)
- records, err := csvReader.ReadAll()
- if err != nil {
- return err
- }
- for _, r := range records {
- if err := csvWriter.Write(r); err != nil {
- return err
+ o.applyToReader(csvReader)
+ closer := defaultCloser
+ if o.closeStream {
+ if cl, isReaderCloser := reader.(io.Closer); isReaderCloser {
+ closer = cl.Close
+ }
+ }
+ defer func() {
+ _ = closer()
+ }()
+
+ switch destination := data.(type) {
+ case *csv.Writer:
+ csvWriter := destination
+ o.applyToWriter(csvWriter)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case CSVWriter:
+ csvWriter := destination
+ // no writer options available
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.Writer:
+ csvWriter := csv.NewWriter(destination)
+ o.applyToWriter(csvWriter)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.ReaderFrom:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ _, err := destination.ReadFrom(&buf)
+
+ return err
+
+ case encoding.BinaryUnmarshaler:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+
+ return destination.UnmarshalBinary(buf.Bytes())
+
+ default:
+ // support *[][]string, *[]byte, *string
+ if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr {
+ return errors.New("destination must be a pointer")
+ }
+
+ v := reflect.Indirect(reflect.ValueOf(data))
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String:
+ csvWriter := &csvRecordsWriter{}
+ // writer options are ignored
+ if err := pipeCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+
+ v.Grow(len(csvWriter.records))
+ v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity
+ v.SetLen(len(csvWriter.records))
+ reflect.Copy(v, reflect.ValueOf(csvWriter.records))
+
+ return nil
+
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ v.SetBytes(buf.Bytes())
+
+ return nil
+
+ case t.Kind() == reflect.String:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ v.SetString(buf.String())
+
+ return nil
+
+ default:
+ return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s",
+ data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface",
+ )
}
}
- csvWriter.Flush()
- return nil
})
}
-// CSVProducer creates a new CSV producer
-func CSVProducer() Producer {
+// CSVProducer creates a new CSV producer.
+//
+// The producer takes input data then writes as CSV to an output writer (essentially as a pipe).
+//
+// Supported input underlying types and interfaces, prioritized in this order:
+// - *csv.Reader
+// - CSVReader (reader options are ignored)
+// - io.Reader
+// - io.WriterTo
+// - encoding.BinaryMarshaler
+// - [][]string
+// - []byte
+// - string
+//
+// The producer prioritizes situations where buffering the input is not required.
+func CSVProducer(opts ...CSVOpt) Producer {
+ o := csvOptsWithDefaults(opts)
+
return ProducerFunc(func(writer io.Writer, data interface{}) error {
if writer == nil {
return errors.New("CSVProducer requires a writer")
}
-
- dataBytes, ok := data.([]byte)
- if !ok {
- return errors.New("data type must be byte array")
+ if data == nil {
+ return errors.New("nil data for CSVProducer")
}
- csvReader := csv.NewReader(bytes.NewBuffer(dataBytes))
- records, err := csvReader.ReadAll()
- if err != nil {
- return err
- }
csvWriter := csv.NewWriter(writer)
- for _, r := range records {
- if err := csvWriter.Write(r); err != nil {
- return err
+ o.applyToWriter(csvWriter)
+ closer := defaultCloser
+ if o.closeStream {
+ if cl, isWriterCloser := writer.(io.Closer); isWriterCloser {
+ closer = cl.Close
+ }
+ }
+ defer func() {
+ _ = closer()
+ }()
+
+ if rc, isDataCloser := data.(io.ReadCloser); isDataCloser {
+ defer rc.Close()
+ }
+
+ switch origin := data.(type) {
+ case *csv.Reader:
+ csvReader := origin
+ o.applyToReader(csvReader)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case CSVReader:
+ csvReader := origin
+ // no reader options available
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.Reader:
+ csvReader := csv.NewReader(origin)
+ o.applyToReader(csvReader)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.WriterTo:
+ // async piping of the writes performed by WriteTo
+ r, w := io.Pipe()
+ csvReader := csv.NewReader(r)
+ o.applyToReader(csvReader)
+
+ pipe, _ := errgroup.WithContext(context.Background())
+ pipe.Go(func() error {
+ _, err := origin.WriteTo(w)
+ _ = w.Close()
+ return err
+ })
+
+ pipe.Go(func() error {
+ defer func() {
+ _ = r.Close()
+ }()
+
+ return pipeCSV(csvWriter, csvReader, o)
+ })
+
+ return pipe.Wait()
+
+ case encoding.BinaryMarshaler:
+ buf, err := origin.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ rdr := bytes.NewBuffer(buf)
+ csvReader := csv.NewReader(rdr)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ default:
+ // support [][]string, []byte, string (or pointers to those)
+ v := reflect.Indirect(reflect.ValueOf(data))
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String:
+ csvReader := &csvRecordsWriter{
+ records: make([][]string, v.Len()),
+ }
+ reflect.Copy(reflect.ValueOf(csvReader.records), v)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
+ buf := bytes.NewBuffer(v.Bytes())
+ csvReader := csv.NewReader(buf)
+ o.applyToReader(csvReader)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ case t.Kind() == reflect.String:
+ buf := bytes.NewBufferString(v.String())
+ csvReader := csv.NewReader(buf)
+ o.applyToReader(csvReader)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ default:
+ return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s",
+ data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface",
+ )
}
}
- csvWriter.Flush()
- return nil
})
}
+
+// pipeCSV copies CSV records from a CSV reader to a CSV writer
+func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error {
+ for ; opts.skippedLines > 0; opts.skippedLines-- {
+ _, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+ for {
+ record, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ return err
+ }
+
+ if err := csvWriter.Write(record); err != nil {
+ return err
+ }
+ }
+
+ csvWriter.Flush()
+
+ return csvWriter.Error()
+}
+
+// bufferedCSV copies CSV records from a CSV reader to a CSV writer,
+// by first reading all records then writing them at once.
+func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error {
+ for ; opts.skippedLines > 0; opts.skippedLines-- {
+ _, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+ records, err := csvReader.ReadAll()
+ if err != nil {
+ return err
+ }
+
+ return csvWriter.WriteAll(records)
+}
diff --git a/vendor/github.com/go-openapi/runtime/csv_options.go b/vendor/github.com/go-openapi/runtime/csv_options.go
new file mode 100644
index 00000000..c16464c5
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/csv_options.go
@@ -0,0 +1,121 @@
+package runtime
+
+import (
+ "encoding/csv"
+ "io"
+)
+
+// CSVOpts alter the behavior of the CSV consumer or producer.
+type CSVOpt func(*csvOpts)
+
+type csvOpts struct {
+ csvReader csv.Reader
+ csvWriter csv.Writer
+ skippedLines int
+ closeStream bool
+}
+
+// WithCSVReaderOpts specifies the options to csv.Reader
+// when reading CSV.
+func WithCSVReaderOpts(reader csv.Reader) CSVOpt {
+ return func(o *csvOpts) {
+ o.csvReader = reader
+ }
+}
+
+// WithCSVWriterOpts specifies the options to csv.Writer
+// when writing CSV.
+func WithCSVWriterOpts(writer csv.Writer) CSVOpt {
+ return func(o *csvOpts) {
+ o.csvWriter = writer
+ }
+}
+
+// WithCSVSkipLines will skip header lines.
+func WithCSVSkipLines(skipped int) CSVOpt {
+ return func(o *csvOpts) {
+ o.skippedLines = skipped
+ }
+}
+
+func WithCSVClosesStream() CSVOpt {
+ return func(o *csvOpts) {
+ o.closeStream = true
+ }
+}
+
+func (o csvOpts) applyToReader(in *csv.Reader) {
+ if o.csvReader.Comma != 0 {
+ in.Comma = o.csvReader.Comma
+ }
+ if o.csvReader.Comment != 0 {
+ in.Comment = o.csvReader.Comment
+ }
+ if o.csvReader.FieldsPerRecord != 0 {
+ in.FieldsPerRecord = o.csvReader.FieldsPerRecord
+ }
+
+ in.LazyQuotes = o.csvReader.LazyQuotes
+ in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace
+ in.ReuseRecord = o.csvReader.ReuseRecord
+}
+
+func (o csvOpts) applyToWriter(in *csv.Writer) {
+ if o.csvWriter.Comma != 0 {
+ in.Comma = o.csvWriter.Comma
+ }
+ in.UseCRLF = o.csvWriter.UseCRLF
+}
+
+func csvOptsWithDefaults(opts []CSVOpt) csvOpts {
+ var o csvOpts
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+type CSVWriter interface {
+ Write([]string) error
+ Flush()
+ Error() error
+}
+
+type CSVReader interface {
+ Read() ([]string, error)
+}
+
+var (
+ _ CSVWriter = &csvRecordsWriter{}
+ _ CSVReader = &csvRecordsWriter{}
+)
+
+// csvRecordsWriter is an internal container to move CSV records back and forth
+type csvRecordsWriter struct {
+ i int
+ records [][]string
+}
+
+func (w *csvRecordsWriter) Write(record []string) error {
+ w.records = append(w.records, record)
+
+ return nil
+}
+
+func (w *csvRecordsWriter) Read() ([]string, error) {
+ if w.i >= len(w.records) {
+ return nil, io.EOF
+ }
+ defer func() {
+ w.i++
+ }()
+
+ return w.records[w.i], nil
+}
+
+func (w *csvRecordsWriter) Flush() {}
+
+func (w *csvRecordsWriter) Error() error {
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/runtime/logger/standard.go b/vendor/github.com/go-openapi/runtime/logger/standard.go
index f7e67ebb..30035a77 100644
--- a/vendor/github.com/go-openapi/runtime/logger/standard.go
+++ b/vendor/github.com/go-openapi/runtime/logger/standard.go
@@ -5,6 +5,8 @@ import (
"os"
)
+var _ Logger = StandardLogger{}
+
type StandardLogger struct{}
func (StandardLogger) Printf(format string, args ...interface{}) {
diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go
index d21ae4e8..44cecf11 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/context.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/context.go
@@ -18,6 +18,8 @@ import (
stdContext "context"
"fmt"
"net/http"
+ "net/url"
+ "path"
"strings"
"sync"
@@ -35,12 +37,21 @@ import (
// Debug when true turns on verbose logging
var Debug = logger.DebugEnabled()
+
+// Logger is the standard libray logger used for printing debug messages
var Logger logger.Logger = logger.StandardLogger{}
-func debugLog(format string, args ...interface{}) {
- if Debug {
- Logger.Printf(format, args...)
+func debugLogfFunc(lg logger.Logger) func(string, ...any) {
+ if logger.DebugEnabled() {
+ if lg == nil {
+ return Logger.Debugf
+ }
+
+ return lg.Debugf
}
+
+ // muted logger
+ return func(_ string, _ ...any) {}
}
// A Builder can create middlewares
@@ -73,10 +84,11 @@ func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Produce
// used throughout to store request context with the standard context attached
// to the http.Request
type Context struct {
- spec *loads.Document
- analyzer *analysis.Spec
- api RoutableAPI
- router Router
+ spec *loads.Document
+ analyzer *analysis.Spec
+ api RoutableAPI
+ router Router
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
type routableUntypedAPI struct {
@@ -162,7 +174,7 @@ func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool
r.hlock.Unlock()
return handler, ok
}
-func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) {
+func (r *routableUntypedAPI) ServeErrorFor(_ string) func(http.ResponseWriter, *http.Request, error) {
return r.api.ServeError
}
func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer {
@@ -189,7 +201,9 @@ func (r *routableUntypedAPI) DefaultConsumes() string {
return r.defaultConsumes
}
-// NewRoutableContext creates a new context for a routable API
+// NewRoutableContext creates a new context for a routable API.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context {
var an *analysis.Spec
if spec != nil {
@@ -199,26 +213,40 @@ func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Ro
return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes)
}
-// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes in input the analysed spec too
+// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes as input an already analysed spec.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context {
// Either there are no spec doc and analysis, or both of them.
if !((spec == nil && an == nil) || (spec != nil && an != nil)) {
panic(errors.New(http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them"))
}
- ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes}
- return ctx
+ return &Context{
+ spec: spec,
+ api: routableAPI,
+ analyzer: an,
+ router: routes,
+ debugLogf: debugLogfFunc(nil),
+ }
}
-// NewContext creates a new context wrapper
+// NewContext creates a new context wrapper.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context {
var an *analysis.Spec
if spec != nil {
an = analysis.New(spec.Spec())
}
- ctx := &Context{spec: spec, analyzer: an}
+ ctx := &Context{
+ spec: spec,
+ analyzer: an,
+ router: routes,
+ debugLogf: debugLogfFunc(nil),
+ }
ctx.api = newRoutableUntypedAPI(spec, api, ctx)
- ctx.router = routes
+
return ctx
}
@@ -282,6 +310,13 @@ func (c *Context) BasePath() string {
return c.spec.BasePath()
}
+// SetLogger allows for injecting a logger to catch debug entries.
+//
+// The logger is enabled in DEBUG mode only.
+func (c *Context) SetLogger(lg logger.Logger) {
+ c.debugLogf = debugLogfFunc(lg)
+}
+
// RequiredProduces returns the accepted content types for responses
func (c *Context) RequiredProduces() []string {
return c.analyzer.RequiredProduces()
@@ -299,6 +334,7 @@ func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, b
if err != nil {
res = append(res, err)
} else {
+ c.debugLogf("validating content type for %q against [%s]", ct, strings.Join(route.Consumes, ", "))
if err := validateContentType(route.Consumes, ct); err != nil {
res = append(res, err)
}
@@ -397,16 +433,16 @@ func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *htt
var rCtx = r.Context()
if v, ok := rCtx.Value(ctxResponseFormat).(string); ok {
- debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v)
+ c.debugLogf("[%s %s] found response format %q in context", r.Method, r.URL.Path, v)
return v, r
}
format := NegotiateContentType(r, offers, "")
if format != "" {
- debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format)
+ c.debugLogf("[%s %s] set response format %q in context", r.Method, r.URL.Path, format)
r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format))
}
- debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format)
+ c.debugLogf("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format)
return format, r
}
@@ -469,7 +505,7 @@ func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute)
var rCtx = request.Context()
if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok {
- debugLog("got cached validation (valid: %t)", len(v.result) == 0)
+ c.debugLogf("got cached validation (valid: %t)", len(v.result) == 0)
if len(v.result) > 0 {
return v.bound, request, errors.CompositeValidationError(v.result...)
}
@@ -481,7 +517,7 @@ func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute)
if len(result.result) > 0 {
return result.bound, request, errors.CompositeValidationError(result.result...)
}
- debugLog("no validation errors found")
+ c.debugLogf("no validation errors found")
return result.bound, request, nil
}
@@ -492,7 +528,7 @@ func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) {
// Respond renders the response after doing some content negotiation
func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) {
- debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces)
+ c.debugLogf("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces)
offers := []string{}
for _, mt := range produces {
if mt != c.api.DefaultProduces() {
@@ -501,7 +537,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
}
// the default producer is last so more specific producers take precedence
offers = append(offers, c.api.DefaultProduces())
- debugLog("offers: %v", offers)
+ c.debugLogf("offers: %v", offers)
var format string
format, r = c.ResponseFormat(r, offers)
@@ -516,7 +552,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
pr, ok := prods[c.api.DefaultProduces()]
if !ok {
- panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
}
prod = pr
}
@@ -542,14 +578,14 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
}
if route == nil || route.Operation == nil {
- rw.WriteHeader(200)
- if r.Method == "HEAD" {
+ rw.WriteHeader(http.StatusOK)
+ if r.Method == http.MethodHead {
return
}
producers := c.api.ProducersFor(normalizeOffers(offers))
prod, ok := producers[format]
if !ok {
- panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
}
if err := prod.Produce(rw, data); err != nil {
panic(err) // let the recovery middleware deal with this
@@ -559,7 +595,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
if _, code, ok := route.Operation.SuccessResponse(); ok {
rw.WriteHeader(code)
- if code == 204 || r.Method == "HEAD" {
+ if code == http.StatusNoContent || r.Method == http.MethodHead {
return
}
@@ -570,7 +606,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
pr, ok := prods[c.api.DefaultProduces()]
if !ok {
- panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
}
prod = pr
}
@@ -584,45 +620,92 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response"))
}
-func (c *Context) APIHandlerSwaggerUI(builder Builder) http.Handler {
+// APIHandlerSwaggerUI returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (SwaggerUI) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandlerSwaggerUI(builder Builder, opts ...UIOption) http.Handler {
b := builder
if b == nil {
b = PassthroughBuilder
}
- var title string
- sp := c.spec.Spec()
- if sp != nil && sp.Info != nil && sp.Info.Title != "" {
- title = sp.Info.Title
- }
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var swaggerUIOpts SwaggerUIOpts
+ fromCommonToAnyOptions(uiOpts, &swaggerUIOpts)
- swaggerUIOpts := SwaggerUIOpts{
- BasePath: c.BasePath(),
- Title: title,
- }
-
- return Spec("", c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)))
+ return Spec(specPath, c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)), specOpts...)
}
-// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec
-func (c *Context) APIHandler(builder Builder) http.Handler {
+// APIHandlerRapiDoc returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (RapiDoc) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandlerRapiDoc(builder Builder, opts ...UIOption) http.Handler {
b := builder
if b == nil {
b = PassthroughBuilder
}
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var rapidocUIOpts RapiDocOpts
+ fromCommonToAnyOptions(uiOpts, &rapidocUIOpts)
+
+ return Spec(specPath, c.spec.Raw(), RapiDoc(rapidocUIOpts, c.RoutesHandler(b)), specOpts...)
+}
+
+// APIHandler returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (Redoc) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandler(builder Builder, opts ...UIOption) http.Handler {
+ b := builder
+ if b == nil {
+ b = PassthroughBuilder
+ }
+
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var redocOpts RedocOpts
+ fromCommonToAnyOptions(uiOpts, &redocOpts)
+
+ return Spec(specPath, c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)), specOpts...)
+}
+
+func (c Context) uiOptionsForHandler(opts []UIOption) (string, uiOptions, []SpecOption) {
var title string
sp := c.spec.Spec()
if sp != nil && sp.Info != nil && sp.Info.Title != "" {
title = sp.Info.Title
}
- redocOpts := RedocOpts{
- BasePath: c.BasePath(),
- Title: title,
+ // default options (may be overridden)
+ optsForContext := []UIOption{
+ WithUIBasePath(c.BasePath()),
+ WithUITitle(title),
+ }
+ optsForContext = append(optsForContext, opts...)
+ uiOpts := uiOptionsWithDefaults(optsForContext)
+
+ // If spec URL is provided, there is a non-default path to serve the spec.
+ // This makes sure that the UI middleware is aligned with the Spec middleware.
+ u, _ := url.Parse(uiOpts.SpecURL)
+ var specPath string
+ if u != nil {
+ specPath = u.Path
}
- return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)))
+ pth, doc := path.Split(specPath)
+ if pth == "." {
+ pth = ""
+ }
+
+ return pth, uiOpts, []SpecOption{WithSpecDocument(doc)}
}
// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec
@@ -633,3 +716,7 @@ func (c *Context) RoutesHandler(builder Builder) http.Handler {
}
return NewRouter(c, b(NewOperationExecutor(c)))
}
+
+func cantFindProducer(format string) string {
+ return "can't find a producer for " + format
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go
index 5d2691ec..4377f77a 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go
@@ -2,6 +2,7 @@
package denco
import (
+ "errors"
"fmt"
"sort"
"strings"
@@ -29,13 +30,13 @@ const (
// Router represents a URL router.
type Router struct {
+ param *doubleArray
// SizeHint expects the maximum number of path parameters in records to Build.
// SizeHint will be used to determine the capacity of the memory to allocate.
// By default, SizeHint will be determined from given records to Build.
SizeHint int
static map[string]interface{}
- param *doubleArray
}
// New returns a new Router.
@@ -51,7 +52,7 @@ func New() *Router {
// params is a slice of the Param that arranged in the order in which parameters appeared.
// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}].
func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) {
- if data, found := rt.static[path]; found {
+ if data, found = rt.static[path]; found {
return data, nil, true
}
if len(rt.param.node) == 1 {
@@ -71,7 +72,7 @@ func (rt *Router) Lookup(path string) (data interface{}, params Params, found bo
func (rt *Router) Build(records []Record) error {
statics, params := makeRecords(records)
if len(params) > MaxSize {
- return fmt.Errorf("denco: too many records")
+ return errors.New("denco: too many records")
}
if rt.SizeHint < 0 {
rt.SizeHint = 0
@@ -131,7 +132,8 @@ func newDoubleArray() *doubleArray {
// baseCheck contains BASE, CHECK and Extra flags.
// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK.
//
-// BASE (22bit) | Extra flags (2bit) | CHECK (8bit)
+// BASE (22bit) | Extra flags (2bit) | CHECK (8bit)
+//
// |----------------------|--|--------|
// 32 10 8 0
type baseCheck uint32
@@ -196,24 +198,29 @@ func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Pa
if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter {
return da.node[da.bc[next].Base()], params, true
}
+
BACKTRACKING:
for j := len(indices) - 1; j >= 0; j-- {
i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff)
if da.bc[idx].IsSingleParam() {
- idx := nextIndex(da.bc[idx].Base(), ParamCharacter)
- if idx >= len(da.bc) {
+ nextIdx := nextIndex(da.bc[idx].Base(), ParamCharacter)
+ if nextIdx >= len(da.bc) {
break
}
+
next := NextSeparator(path, i)
- params := append(params, Param{Value: path[i:next]})
- if nd, params, found := da.lookup(path[next:], params, idx); found {
- return nd, params, true
+ nextParams := params
+ nextParams = append(nextParams, Param{Value: path[i:next]})
+ if nd, nextNextParams, found := da.lookup(path[next:], nextParams, nextIdx); found {
+ return nd, nextNextParams, true
}
}
+
if da.bc[idx].IsWildcardParam() {
- idx := nextIndex(da.bc[idx].Base(), WildcardCharacter)
- params := append(params, Param{Value: path[i:]})
- return da.node[da.bc[idx].Base()], params, true
+ nextIdx := nextIndex(da.bc[idx].Base(), WildcardCharacter)
+ nextParams := params
+ nextParams = append(nextParams, Param{Value: path[i:]})
+ return da.node[da.bc[nextIdx].Base()], nextParams, true
}
}
return nil, nil, false
@@ -325,7 +332,7 @@ func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[i
}
base = da.findBase(siblings, idx, usedBase)
if base > MaxSize {
- return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice")
+ return -1, nil, nil, errors.New("denco: too many elements of internal slice")
}
da.setBase(idx, base)
return base, siblings, leaf, err
@@ -386,7 +393,7 @@ func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, er
case pc == c:
continue
default:
- return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted")
+ return nil, nil, errors.New("denco: BUG: routing table hasn't been sorted")
}
if n > 0 {
sib[n-1].end = i
@@ -431,7 +438,7 @@ func makeRecords(srcs []Record) (statics, params []*record) {
wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter)
restconfPrefix := string(PathParamCharacter) + string(ParamCharacter)
for _, r := range srcs {
- if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) ||strings.Contains(r.Key, restconfPrefix){
+ if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) || strings.Contains(r.Key, restconfPrefix) {
r.Key += termChar
params = append(params, &record{Record: r})
} else {
diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go
index eaf90606..836a9885 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/doc.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/doc.go
@@ -12,51 +12,52 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-/*Package middleware provides the library with helper functions for serving swagger APIs.
+/*
+Package middleware provides the library with helper functions for serving swagger APIs.
Pseudo middleware handler
- import (
- "net/http"
+ import (
+ "net/http"
- "github.com/go-openapi/errors"
- )
+ "github.com/go-openapi/errors"
+ )
- func newCompleteMiddleware(ctx *Context) http.Handler {
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- // use context to lookup routes
- if matched, ok := ctx.RouteInfo(r); ok {
+ func newCompleteMiddleware(ctx *Context) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ // use context to lookup routes
+ if matched, ok := ctx.RouteInfo(r); ok {
- if matched.NeedsAuth() {
- if _, err := ctx.Authorize(r, matched); err != nil {
- ctx.Respond(rw, r, matched.Produces, matched, err)
- return
- }
- }
+ if matched.NeedsAuth() {
+ if _, err := ctx.Authorize(r, matched); err != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, err)
+ return
+ }
+ }
- bound, validation := ctx.BindAndValidate(r, matched)
- if validation != nil {
- ctx.Respond(rw, r, matched.Produces, matched, validation)
- return
- }
+ bound, validation := ctx.BindAndValidate(r, matched)
+ if validation != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, validation)
+ return
+ }
- result, err := matched.Handler.Handle(bound)
- if err != nil {
- ctx.Respond(rw, r, matched.Produces, matched, err)
- return
- }
+ result, err := matched.Handler.Handle(bound)
+ if err != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, err)
+ return
+ }
- ctx.Respond(rw, r, matched.Produces, matched, result)
- return
- }
+ ctx.Respond(rw, r, matched.Produces, matched, result)
+ return
+ }
- // Not found, check if it exists in the other methods first
- if others := ctx.AllowedMethods(r); len(others) > 0 {
- ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others))
- return
- }
- ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path))
- })
- }
+ // Not found, check if it exists in the other methods first
+ if others := ctx.AllowedMethods(r); len(others) > 0 {
+ ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others))
+ return
+ }
+ ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path))
+ })
+ }
*/
package middleware
diff --git a/vendor/github.com/go-openapi/runtime/middleware/go18.go b/vendor/github.com/go-openapi/runtime/middleware/go18.go
deleted file mode 100644
index 75c762c0..00000000
--- a/vendor/github.com/go-openapi/runtime/middleware/go18.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.8
-
-package middleware
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.PathUnescape(path)
-}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/vendor/github.com/go-openapi/runtime/middleware/header/header.go
index e069743e..df073c87 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/header/header.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/header/header.go
@@ -195,7 +195,8 @@ func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) {
}
// ParseAccept parses Accept* headers.
-func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
+func ParseAccept(header http.Header, key string) []AcceptSpec {
+ var specs []AcceptSpec
loop:
for _, s := range header[key] {
for {
@@ -218,6 +219,7 @@ loop:
}
}
}
+
specs = append(specs, spec)
s = skipSpace(s)
if !strings.HasPrefix(s, ",") {
@@ -226,7 +228,8 @@ loop:
s = skipSpace(s[1:])
}
}
- return
+
+ return specs
}
func skipSpace(s string) (rest string) {
@@ -306,7 +309,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) {
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
- for i = i + 1; i < len(s); i++ {
+ for i++; i < len(s); i++ {
b := s[i]
switch {
case escape:
diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go
index 9aaf6595..9c3353a9 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/parameter.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/parameter.go
@@ -34,6 +34,11 @@ import (
const defaultMaxMemory = 32 << 20
+const (
+ typeString = "string"
+ typeArray = "array"
+)
+
var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder {
@@ -66,7 +71,7 @@ func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items
case "boolean":
return reflect.TypeOf(true)
- case "string":
+ case typeString:
if tt, ok := p.formats.GetType(format); ok {
return tt
}
@@ -94,7 +99,7 @@ func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items
return reflect.TypeOf(float64(0))
}
- case "array":
+ case typeArray:
if items == nil {
return nil
}
@@ -119,7 +124,7 @@ func (p *untypedParamBinder) allowsMulti() bool {
func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) {
name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type
- if tpe == "array" {
+ if tpe == typeArray {
if cf == "multi" {
if !p.allowsMulti() {
return nil, false, false, errors.InvalidCollectionFormat(name, in, cf)
@@ -208,10 +213,11 @@ func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams
if ffErr != nil {
if p.parameter.Required {
return errors.NewParseError(p.Name, p.parameter.In, "", ffErr)
- } else {
- return nil
}
+
+ return nil
}
+
target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header}))
return nil
}
@@ -263,7 +269,7 @@ func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams
}
func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error {
- if p.parameter.Type == "array" {
+ if p.parameter.Type == typeArray {
return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey)
}
var d string
@@ -273,7 +279,7 @@ func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflec
return p.setFieldValue(target, p.parameter.Default, d, hasKey)
}
-func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error {
+func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { //nolint:gocyclo
tpe := p.parameter.Type
if p.parameter.Format != "" {
tpe = p.parameter.Format
@@ -317,7 +323,7 @@ func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue in
return nil
}
- switch target.Kind() {
+ switch target.Kind() { //nolint:exhaustive // we want to check only types that map from a swagger parameter
case reflect.Bool:
if data == "" {
if target.CanSet() {
diff --git a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go b/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go
deleted file mode 100644
index 03385251..00000000
--- a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !go1.8
-
-package middleware
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.QueryUnescape(path)
-}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go
index 4be330d6..ef75e744 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go
@@ -10,67 +10,57 @@ import (
// RapiDocOpts configures the RapiDoc middlewares
type RapiDocOpts struct {
- // BasePath for the UI path, defaults to: /
+ // BasePath for the UI, defaults to: /
BasePath string
- // Path combines with BasePath for the full UI path, defaults to: docs
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
- // SpecURL the url to find the spec for
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
SpecURL string
- // RapiDocURL for the js that generates the rapidoc site, defaults to: https://cdn.jsdelivr.net/npm/rapidoc/bundles/rapidoc.standalone.js
- RapiDocURL string
+
// Title for the documentation site, default to: API documentation
Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
+ // RapiDocURL points to the js asset that generates the rapidoc site.
+ //
+ // Defaults to https://unpkg.com/rapidoc/dist/rapidoc-min.js
+ RapiDocURL string
}
-// EnsureDefaults in case some options are missing
func (r *RapiDocOpts) EnsureDefaults() {
- if r.BasePath == "" {
- r.BasePath = "/"
- }
- if r.Path == "" {
- r.Path = "docs"
- }
- if r.SpecURL == "" {
- r.SpecURL = "/swagger.json"
- }
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // rapidoc-specifics
if r.RapiDocURL == "" {
r.RapiDocURL = rapidocLatest
}
- if r.Title == "" {
- r.Title = "API documentation"
+ if r.Template == "" {
+ r.Template = rapidocTemplate
}
}
// RapiDoc creates a middleware to serve a documentation site for a swagger spec.
-// This allows for altering the spec before starting the http listener.
//
+// This allows for altering the spec before starting the http listener.
func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
- tmpl := template.Must(template.New("rapidoc").Parse(rapidocTemplate))
+ tmpl := template.Must(template.New("rapidoc").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
- buf := bytes.NewBuffer(nil)
- _ = tmpl.Execute(buf, opts)
- b := buf.Bytes()
-
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if r.URL.Path == pth {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.WriteHeader(http.StatusOK)
-
- _, _ = rw.Write(b)
- return
- }
-
- if next == nil {
- rw.Header().Set("Content-Type", "text/plain")
- rw.WriteHeader(http.StatusNotFound)
- _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
- return
- }
- next.ServeHTTP(rw, r)
- })
+ return serveUI(pth, assets.Bytes(), next)
}
const (
@@ -79,7 +69,7 @@ const (
{{ .Title }}
-
+
diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go
index 019c8542..b96b01e7 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/redoc.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go
@@ -10,67 +10,58 @@ import (
// RedocOpts configures the Redoc middlewares
type RedocOpts struct {
- // BasePath for the UI path, defaults to: /
+ // BasePath for the UI, defaults to: /
BasePath string
- // Path combines with BasePath for the full UI path, defaults to: docs
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
- // SpecURL the url to find the spec for
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
SpecURL string
- // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js
- RedocURL string
+
// Title for the documentation site, default to: API documentation
Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
+ // RedocURL points to the js that generates the redoc site.
+ //
+ // Defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js
+ RedocURL string
}
// EnsureDefaults in case some options are missing
func (r *RedocOpts) EnsureDefaults() {
- if r.BasePath == "" {
- r.BasePath = "/"
- }
- if r.Path == "" {
- r.Path = "docs"
- }
- if r.SpecURL == "" {
- r.SpecURL = "/swagger.json"
- }
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // redoc-specifics
if r.RedocURL == "" {
r.RedocURL = redocLatest
}
- if r.Title == "" {
- r.Title = "API documentation"
+ if r.Template == "" {
+ r.Template = redocTemplate
}
}
// Redoc creates a middleware to serve a documentation site for a swagger spec.
-// This allows for altering the spec before starting the http listener.
//
+// This allows for altering the spec before starting the http listener.
func Redoc(opts RedocOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
- tmpl := template.Must(template.New("redoc").Parse(redocTemplate))
+ tmpl := template.Must(template.New("redoc").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
- buf := bytes.NewBuffer(nil)
- _ = tmpl.Execute(buf, opts)
- b := buf.Bytes()
-
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if r.URL.Path == pth {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.WriteHeader(http.StatusOK)
-
- _, _ = rw.Write(b)
- return
- }
-
- if next == nil {
- rw.Header().Set("Content-Type", "text/plain")
- rw.WriteHeader(http.StatusNotFound)
- _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
- return
- }
- next.ServeHTTP(rw, r)
- })
+ return serveUI(pth, assets.Bytes(), next)
}
const (
diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go
index 760c3786..82e14366 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/request.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/request.go
@@ -19,10 +19,10 @@ import (
"reflect"
"github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/runtime/logger"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
-
- "github.com/go-openapi/runtime"
)
// UntypedRequestBinder binds and validates the data from a http request
@@ -31,6 +31,7 @@ type UntypedRequestBinder struct {
Parameters map[string]spec.Parameter
Formats strfmt.Registry
paramBinders map[string]*untypedParamBinder
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
// NewUntypedRequestBinder creates a new binder for reading a request.
@@ -44,6 +45,7 @@ func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Sw
paramBinders: binders,
Spec: spec,
Formats: formats,
+ debugLogf: debugLogfFunc(nil),
}
}
@@ -52,10 +54,10 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara
val := reflect.Indirect(reflect.ValueOf(data))
isMap := val.Kind() == reflect.Map
var result []error
- debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath())
+ o.debugLogf("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath())
for fieldName, param := range o.Parameters {
binder := o.paramBinders[fieldName]
- debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath())
+ o.debugLogf("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath())
var target reflect.Value
if !isMap {
binder.Name = fieldName
@@ -65,7 +67,7 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara
if isMap {
tpe := binder.Type()
if tpe == nil {
- if param.Schema.Type.Contains("array") {
+ if param.Schema.Type.Contains(typeArray) {
tpe = reflect.TypeOf([]interface{}{})
} else {
tpe = reflect.TypeOf(map[string]interface{}{})
@@ -102,3 +104,14 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara
return nil
}
+
+// SetLogger allows for injecting a logger to catch debug entries.
+//
+// The logger is enabled in DEBUG mode only.
+func (o *UntypedRequestBinder) SetLogger(lg logger.Logger) {
+ o.debugLogf = debugLogfFunc(lg)
+}
+
+func (o *UntypedRequestBinder) setDebugLogf(fn func(string, ...any)) {
+ o.debugLogf = fn
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go
index 5052031c..3a6aee90 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/router.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/router.go
@@ -17,10 +17,12 @@ package middleware
import (
"fmt"
"net/http"
+ "net/url"
fpath "path"
"regexp"
"strings"
+ "github.com/go-openapi/runtime/logger"
"github.com/go-openapi/runtime/security"
"github.com/go-openapi/swag"
@@ -67,10 +69,10 @@ func (r RouteParams) GetOK(name string) ([]string, bool, bool) {
return nil, false, false
}
-// NewRouter creates a new context aware router middleware
+// NewRouter creates a new context-aware router middleware
func NewRouter(ctx *Context, next http.Handler) http.Handler {
if ctx.router == nil {
- ctx.router = DefaultRouter(ctx.spec, ctx.api)
+ ctx.router = DefaultRouter(ctx.spec, ctx.api, WithDefaultRouterLoggerFunc(ctx.debugLogf))
}
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
@@ -103,41 +105,75 @@ type RoutableAPI interface {
DefaultConsumes() string
}
-// Router represents a swagger aware router
+// Router represents a swagger-aware router
type Router interface {
Lookup(method, path string) (*MatchedRoute, bool)
OtherMethods(method, path string) []string
}
type defaultRouteBuilder struct {
- spec *loads.Document
- analyzer *analysis.Spec
- api RoutableAPI
- records map[string][]denco.Record
+ spec *loads.Document
+ analyzer *analysis.Spec
+ api RoutableAPI
+ records map[string][]denco.Record
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
type defaultRouter struct {
- spec *loads.Document
- routers map[string]*denco.Router
+ spec *loads.Document
+ routers map[string]*denco.Router
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
-func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder {
+func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) *defaultRouteBuilder {
+ var o defaultRouterOpts
+ for _, apply := range opts {
+ apply(&o)
+ }
+ if o.debugLogf == nil {
+ o.debugLogf = debugLogfFunc(nil) // defaults to standard logger
+ }
+
return &defaultRouteBuilder{
- spec: spec,
- analyzer: analysis.New(spec.Spec()),
- api: api,
- records: make(map[string][]denco.Record),
+ spec: spec,
+ analyzer: analysis.New(spec.Spec()),
+ api: api,
+ records: make(map[string][]denco.Record),
+ debugLogf: o.debugLogf,
}
}
-// DefaultRouter creates a default implemenation of the router
-func DefaultRouter(spec *loads.Document, api RoutableAPI) Router {
- builder := newDefaultRouteBuilder(spec, api)
+// DefaultRouterOpt allows to inject optional behavior to the default router.
+type DefaultRouterOpt func(*defaultRouterOpts)
+
+type defaultRouterOpts struct {
+ debugLogf func(string, ...any)
+}
+
+// WithDefaultRouterLogger sets the debug logger for the default router.
+//
+// This is enabled only in DEBUG mode.
+func WithDefaultRouterLogger(lg logger.Logger) DefaultRouterOpt {
+ return func(o *defaultRouterOpts) {
+ o.debugLogf = debugLogfFunc(lg)
+ }
+}
+
+// WithDefaultRouterLoggerFunc sets a logging debug method for the default router.
+func WithDefaultRouterLoggerFunc(fn func(string, ...any)) DefaultRouterOpt {
+ return func(o *defaultRouterOpts) {
+ o.debugLogf = fn
+ }
+}
+
+// DefaultRouter creates a default implementation of the router
+func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) Router {
+ builder := newDefaultRouteBuilder(spec, api, opts...)
if spec != nil {
for method, paths := range builder.analyzer.Operations() {
for path, operation := range paths {
fp := fpath.Join(spec.BasePath(), path)
- debugLog("adding route %s %s %q", method, fp, operation.ID)
+ builder.debugLogf("adding route %s %s %q", method, fp, operation.ID)
builder.AddRoute(method, fp, operation)
}
}
@@ -319,24 +355,24 @@ func (m *MatchedRoute) NeedsAuth() bool {
func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) {
mth := strings.ToUpper(method)
- debugLog("looking up route for %s %s", method, path)
+ d.debugLogf("looking up route for %s %s", method, path)
if Debug {
if len(d.routers) == 0 {
- debugLog("there are no known routers")
+ d.debugLogf("there are no known routers")
}
for meth := range d.routers {
- debugLog("got a router for %s", meth)
+ d.debugLogf("got a router for %s", meth)
}
}
if router, ok := d.routers[mth]; ok {
if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil {
if entry, ok := m.(*routeEntry); ok {
- debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters))
+ d.debugLogf("found a route for %s %s with %d parameters", method, path, len(entry.Parameters))
var params RouteParams
for _, p := range rp {
- v, err := pathUnescape(p.Value)
+ v, err := url.PathUnescape(p.Value)
if err != nil {
- debugLog("failed to escape %q: %v", p.Value, err)
+ d.debugLogf("failed to escape %q: %v", p.Value, err)
v = p.Value
}
// a workaround to handle fragment/composing parameters until they are supported in denco router
@@ -356,10 +392,10 @@ func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) {
return &MatchedRoute{routeEntry: *entry, Params: params}, true
}
} else {
- debugLog("couldn't find a route by path for %s %s", method, path)
+ d.debugLogf("couldn't find a route by path for %s %s", method, path)
}
} else {
- debugLog("couldn't find a route by method for %s %s", method, path)
+ d.debugLogf("couldn't find a route by method for %s %s", method, path)
}
return nil, false
}
@@ -378,6 +414,10 @@ func (d *defaultRouter) OtherMethods(method, path string) []string {
return methods
}
+func (d *defaultRouter) SetLogger(lg logger.Logger) {
+ d.debugLogf = debugLogfFunc(lg)
+}
+
// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco
var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`)
@@ -413,7 +453,7 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
bp = bp[:len(bp)-1]
}
- debugLog("operation: %#v", *operation)
+ d.debugLogf("operation: %#v", *operation)
if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok {
consumes := d.analyzer.ConsumesFor(operation)
produces := d.analyzer.ProducesFor(operation)
@@ -428,6 +468,8 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
produces = append(produces, defProduces)
}
+ requestBinder := NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats())
+ requestBinder.setDebugLogf(d.debugLogf)
record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{
BasePath: bp,
PathPattern: path,
@@ -439,7 +481,7 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
Producers: d.api.ProducersFor(normalizeOffers(produces)),
Parameters: parameters,
Formats: d.api.Formats(),
- Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()),
+ Binder: requestBinder,
Authenticators: d.buildAuthenticators(operation),
Authorizer: d.api.Authorizer(),
})
@@ -449,11 +491,11 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators {
requirements := d.analyzer.SecurityRequirementsFor(operation)
- var auths []RouteAuthenticator
+ auths := make([]RouteAuthenticator, 0, len(requirements))
for _, reqs := range requirements {
- var schemes []string
+ schemes := make([]string, 0, len(reqs))
scopes := make(map[string][]string, len(reqs))
- var scopeSlices [][]string
+ scopeSlices := make([][]string, 0, len(reqs))
for _, req := range reqs {
schemes = append(schemes, req.Name)
scopes[req.Name] = req.Scopes
@@ -482,7 +524,8 @@ func (d *defaultRouteBuilder) Build() *defaultRouter {
routers[method] = router
}
return &defaultRouter{
- spec: d.spec,
- routers: routers,
+ spec: d.spec,
+ routers: routers,
+ debugLogf: d.debugLogf,
}
}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go
index f0291429..87e17e34 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/spec.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go
@@ -19,30 +19,84 @@ import (
"path"
)
-// Spec creates a middleware to serve a swagger spec.
-// This allows for altering the spec before starting the http listener.
-// This can be useful if you want to serve the swagger spec from another path than /swagger.json
+const (
+ contentTypeHeader = "Content-Type"
+ applicationJSON = "application/json"
+)
+
+// SpecOption can be applied to the Spec serving middleware
+type SpecOption func(*specOptions)
+
+var defaultSpecOptions = specOptions{
+ Path: "",
+ Document: "swagger.json",
+}
+
+type specOptions struct {
+ Path string
+ Document string
+}
+
+func specOptionsWithDefaults(opts []SpecOption) specOptions {
+ o := defaultSpecOptions
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+// Spec creates a middleware to serve a swagger spec as a JSON document.
//
-func Spec(basePath string, b []byte, next http.Handler) http.Handler {
+// This allows for altering the spec before starting the http listener.
+//
+// The basePath argument indicates the path of the spec document (defaults to "/").
+// Additional SpecOption can be used to change the name of the document (defaults to "swagger.json").
+func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http.Handler {
if basePath == "" {
basePath = "/"
}
- pth := path.Join(basePath, "swagger.json")
+ o := specOptionsWithDefaults(opts)
+ pth := path.Join(basePath, o.Path, o.Document)
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if r.URL.Path == pth {
- rw.Header().Set("Content-Type", "application/json")
+ if path.Clean(r.URL.Path) == pth {
+ rw.Header().Set(contentTypeHeader, applicationJSON)
rw.WriteHeader(http.StatusOK)
- //#nosec
_, _ = rw.Write(b)
+
return
}
- if next == nil {
- rw.Header().Set("Content-Type", "application/json")
- rw.WriteHeader(http.StatusNotFound)
+ if next != nil {
+ next.ServeHTTP(rw, r)
+
return
}
- next.ServeHTTP(rw, r)
+
+ rw.Header().Set(contentTypeHeader, applicationJSON)
+ rw.WriteHeader(http.StatusNotFound)
})
}
+
+// WithSpecPath sets the path to be joined to the base path of the Spec middleware.
+//
+// This is empty by default.
+func WithSpecPath(pth string) SpecOption {
+ return func(o *specOptions) {
+ o.Path = pth
+ }
+}
+
+// WithSpecDocument sets the name of the JSON document served as a spec.
+//
+// By default, this is "swagger.json"
+func WithSpecDocument(doc string) SpecOption {
+ return func(o *specOptions) {
+ if doc == "" {
+ return
+ }
+
+ o.Document = doc
+ }
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go
index b4dea29e..ec3c10cb 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go
@@ -8,40 +8,65 @@ import (
"path"
)
-// SwaggerUIOpts configures the Swaggerui middlewares
+// SwaggerUIOpts configures the SwaggerUI middleware
type SwaggerUIOpts struct {
- // BasePath for the UI path, defaults to: /
+ // BasePath for the API, defaults to: /
BasePath string
- // Path combines with BasePath for the full UI path, defaults to: docs
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
- // SpecURL the url to find the spec for
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
SpecURL string
+
+ // Title for the documentation site, default to: API documentation
+ Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
// OAuthCallbackURL the url called after OAuth2 login
OAuthCallbackURL string
// The three components needed to embed swagger-ui
- SwaggerURL string
+
+ // SwaggerURL points to the js that generates the SwaggerUI site.
+ //
+ // Defaults to: https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js
+ SwaggerURL string
+
SwaggerPresetURL string
SwaggerStylesURL string
Favicon32 string
Favicon16 string
-
- // Title for the documentation site, default to: API documentation
- Title string
}
// EnsureDefaults in case some options are missing
func (r *SwaggerUIOpts) EnsureDefaults() {
- if r.BasePath == "" {
- r.BasePath = "/"
+ r.ensureDefaults()
+
+ if r.Template == "" {
+ r.Template = swaggeruiTemplate
}
- if r.Path == "" {
- r.Path = "docs"
- }
- if r.SpecURL == "" {
- r.SpecURL = "/swagger.json"
+}
+
+func (r *SwaggerUIOpts) EnsureDefaultsOauth2() {
+ r.ensureDefaults()
+
+ if r.Template == "" {
+ r.Template = swaggerOAuthTemplate
}
+}
+
+func (r *SwaggerUIOpts) ensureDefaults() {
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // swaggerui-specifics
if r.OAuthCallbackURL == "" {
r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback")
}
@@ -60,40 +85,22 @@ func (r *SwaggerUIOpts) EnsureDefaults() {
if r.Favicon32 == "" {
r.Favicon32 = swaggerFavicon32Latest
}
- if r.Title == "" {
- r.Title = "API documentation"
- }
}
// SwaggerUI creates a middleware to serve a documentation site for a swagger spec.
+//
// This allows for altering the spec before starting the http listener.
func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
- tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate))
+ tmpl := template.Must(template.New("swaggerui").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
- buf := bytes.NewBuffer(nil)
- _ = tmpl.Execute(buf, &opts)
- b := buf.Bytes()
-
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if path.Join(r.URL.Path) == pth {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.WriteHeader(http.StatusOK)
-
- _, _ = rw.Write(b)
- return
- }
-
- if next == nil {
- rw.Header().Set("Content-Type", "text/plain")
- rw.WriteHeader(http.StatusNotFound)
- _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
- return
- }
- next.ServeHTTP(rw, r)
- })
+ return serveUI(pth, assets.Bytes(), next)
}
const (
diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go
index 576f6003..e81212f7 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go
@@ -4,37 +4,20 @@ import (
"bytes"
"fmt"
"net/http"
- "path"
"text/template"
)
func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler {
- opts.EnsureDefaults()
+ opts.EnsureDefaultsOauth2()
pth := opts.OAuthCallbackURL
- tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate))
+ tmpl := template.Must(template.New("swaggeroauth").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
- buf := bytes.NewBuffer(nil)
- _ = tmpl.Execute(buf, &opts)
- b := buf.Bytes()
-
- return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- if path.Join(r.URL.Path) == pth {
- rw.Header().Set("Content-Type", "text/html; charset=utf-8")
- rw.WriteHeader(http.StatusOK)
-
- _, _ = rw.Write(b)
- return
- }
-
- if next == nil {
- rw.Header().Set("Content-Type", "text/plain")
- rw.WriteHeader(http.StatusNotFound)
- _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
- return
- }
- next.ServeHTTP(rw, r)
- })
+ return serveUI(pth, assets.Bytes(), next)
}
const (
diff --git a/vendor/github.com/go-openapi/runtime/middleware/ui_options.go b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go
new file mode 100644
index 00000000..b86efa00
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go
@@ -0,0 +1,173 @@
+package middleware
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+)
+
+const (
+ // constants that are common to all UI-serving middlewares
+ defaultDocsPath = "docs"
+ defaultDocsURL = "/swagger.json"
+ defaultDocsTitle = "API Documentation"
+)
+
+// uiOptions defines common options for UI serving middlewares.
+type uiOptions struct {
+ // BasePath for the UI, defaults to: /
+ BasePath string
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
+ Path string
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
+ SpecURL string
+
+ // Title for the documentation site, default to: API documentation
+ Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+}
+
+// toCommonUIOptions converts any UI option type to retain the common options.
+//
+// This uses gob encoding/decoding to convert common fields from one struct to another.
+func toCommonUIOptions(opts interface{}) uiOptions {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ dec := gob.NewDecoder(&buf)
+ var o uiOptions
+ err := enc.Encode(opts)
+ if err != nil {
+ panic(err)
+ }
+
+ err = dec.Decode(&o)
+ if err != nil {
+ panic(err)
+ }
+
+ return o
+}
+
+func fromCommonToAnyOptions[T any](source uiOptions, target *T) {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ dec := gob.NewDecoder(&buf)
+ err := enc.Encode(source)
+ if err != nil {
+ panic(err)
+ }
+
+ err = dec.Decode(target)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// UIOption can be applied to UI serving middleware, such as Context.APIHandler or
+// Context.APIHandlerSwaggerUI to alter the defaut behavior.
+type UIOption func(*uiOptions)
+
+func uiOptionsWithDefaults(opts []UIOption) uiOptions {
+ var o uiOptions
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+// WithUIBasePath sets the base path from where to serve the UI assets.
+//
+// By default, Context middleware sets this value to the API base path.
+func WithUIBasePath(base string) UIOption {
+ return func(o *uiOptions) {
+ if !strings.HasPrefix(base, "/") {
+ base = "/" + base
+ }
+ o.BasePath = base
+ }
+}
+
+// WithUIPath sets the path from where to serve the UI assets (i.e. /{basepath}/{path}.
+func WithUIPath(pth string) UIOption {
+ return func(o *uiOptions) {
+ o.Path = pth
+ }
+}
+
+// WithUISpecURL sets the path from where to serve swagger spec document.
+//
+// This may be specified as a full URL or a path.
+//
+// By default, this is "/swagger.json"
+func WithUISpecURL(specURL string) UIOption {
+ return func(o *uiOptions) {
+ o.SpecURL = specURL
+ }
+}
+
+// WithUITitle sets the title of the UI.
+//
+// By default, Context middleware sets this value to the title found in the API spec.
+func WithUITitle(title string) UIOption {
+ return func(o *uiOptions) {
+ o.Title = title
+ }
+}
+
+// WithTemplate allows to set a custom template for the UI.
+//
+// UI middleware will panic if the template does not parse or execute properly.
+func WithTemplate(tpl string) UIOption {
+ return func(o *uiOptions) {
+ o.Template = tpl
+ }
+}
+
+// EnsureDefaults in case some options are missing
+func (r *uiOptions) EnsureDefaults() {
+ if r.BasePath == "" {
+ r.BasePath = "/"
+ }
+ if r.Path == "" {
+ r.Path = defaultDocsPath
+ }
+ if r.SpecURL == "" {
+ r.SpecURL = defaultDocsURL
+ }
+ if r.Title == "" {
+ r.Title = defaultDocsTitle
+ }
+}
+
+// serveUI creates a middleware that serves a templated asset as text/html.
+func serveUI(pth string, assets []byte, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ if path.Clean(r.URL.Path) == pth {
+ rw.Header().Set(contentTypeHeader, "text/html; charset=utf-8")
+ rw.WriteHeader(http.StatusOK)
+ _, _ = rw.Write(assets)
+
+ return
+ }
+
+ if next != nil {
+ next.ServeHTTP(rw, r)
+
+ return
+ }
+
+ rw.Header().Set(contentTypeHeader, "text/plain")
+ rw.WriteHeader(http.StatusNotFound)
+ _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go
index 39a85f7d..7b7269bd 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go
@@ -197,30 +197,31 @@ func (d *API) Validate() error {
// validateWith validates the registrations in this API against the provided spec analyzer
func (d *API) validate() error {
- var consumes []string
+ consumes := make([]string, 0, len(d.consumers))
for k := range d.consumers {
consumes = append(consumes, k)
}
- var produces []string
+ produces := make([]string, 0, len(d.producers))
for k := range d.producers {
produces = append(produces, k)
}
- var authenticators []string
+ authenticators := make([]string, 0, len(d.authenticators))
for k := range d.authenticators {
authenticators = append(authenticators, k)
}
- var operations []string
+ operations := make([]string, 0, len(d.operations))
for m, v := range d.operations {
for p := range v {
operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p))
}
}
- var definedAuths []string
- for k := range d.spec.Spec().SecurityDefinitions {
+ secDefinitions := d.spec.Spec().SecurityDefinitions
+ definedAuths := make([]string, 0, len(secDefinitions))
+ for k := range secDefinitions {
definedAuths = append(definedAuths, k)
}
@@ -267,7 +268,7 @@ func (d *API) verify(name string, registrations []string, expectations []string)
delete(expected, k)
}
- var unregistered []string
+ unregistered := make([]string, 0, len(expected))
for k := range expected {
unregistered = append(unregistered, k)
}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go
index 1f0135b5..0a5356c6 100644
--- a/vendor/github.com/go-openapi/runtime/middleware/validation.go
+++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go
@@ -35,7 +35,6 @@ type validation struct {
// ContentType validates the content type of a request
func validateContentType(allowed []string, actual string) error {
- debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", "))
if len(allowed) == 0 {
return nil
}
@@ -57,13 +56,13 @@ func validateContentType(allowed []string, actual string) error {
}
func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation {
- debugLog("validating request %s %s", request.Method, request.URL.EscapedPath())
validate := &validation{
context: ctx,
request: request,
route: route,
bound: make(map[string]interface{}),
}
+ validate.debugLogf("validating request %s %s", request.Method, request.URL.EscapedPath())
validate.contentType()
if len(validate.result) == 0 {
@@ -76,8 +75,12 @@ func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *
return validate
}
+func (v *validation) debugLogf(format string, args ...any) {
+ v.context.debugLogf(format, args...)
+}
+
func (v *validation) parameters() {
- debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath())
+ v.debugLogf("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath())
if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil {
if result.Error() == "validation failure list" {
for _, e := range result.(*errors.Validation).Value.([]interface{}) {
@@ -91,7 +94,7 @@ func (v *validation) parameters() {
func (v *validation) contentType() {
if len(v.result) == 0 && runtime.HasBody(v.request) {
- debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath())
+ v.debugLogf("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath())
ct, _, req, err := v.context.ContentType(v.request)
if err != nil {
v.result = append(v.result, err)
@@ -100,6 +103,7 @@ func (v *validation) contentType() {
}
if len(v.result) == 0 {
+ v.debugLogf("validating content type for %q against [%s]", ct, strings.Join(v.route.Consumes, ", "))
if err := validateContentType(v.route.Consumes, ct); err != nil {
v.result = append(v.result, err)
}
diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go
index 078fda17..9e3e1ecb 100644
--- a/vendor/github.com/go-openapi/runtime/request.go
+++ b/vendor/github.com/go-openapi/runtime/request.go
@@ -16,6 +16,8 @@ package runtime
import (
"bufio"
+ "context"
+ "errors"
"io"
"net/http"
"strings"
@@ -96,10 +98,16 @@ func (p *peekingReader) Read(d []byte) (int, error) {
if p == nil {
return 0, io.EOF
}
+ if p.underlying == nil {
+ return 0, io.ErrUnexpectedEOF
+ }
return p.underlying.Read(d)
}
func (p *peekingReader) Close() error {
+ if p.underlying == nil {
+ return errors.New("reader already closed")
+ }
p.underlying = nil
if p.orig != nil {
return p.orig.Close()
@@ -107,9 +115,11 @@ func (p *peekingReader) Close() error {
return nil
}
-// JSONRequest creates a new http request with json headers set
+// JSONRequest creates a new http request with json headers set.
+//
+// It uses context.Background.
func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
- req, err := http.NewRequest(method, urlStr, body)
+ req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go
index c3ffdac7..bb30472b 100644
--- a/vendor/github.com/go-openapi/runtime/security/authenticator.go
+++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go
@@ -25,12 +25,13 @@ import (
)
const (
- query = "query"
- header = "header"
+ query = "query"
+ header = "header"
+ accessTokenParam = "access_token"
)
// HttpAuthenticator is a function that authenticates a HTTP request
-func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator {
+func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { //nolint:revive,stylecheck
return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) {
if request, ok := params.(*http.Request); ok {
return handler(request)
@@ -158,7 +159,7 @@ func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authe
inl := strings.ToLower(in)
if inl != query && inl != header {
// panic because this is most likely a typo
- panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\"."))
+ panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\""))
}
var getToken func(*http.Request) string
@@ -186,7 +187,7 @@ func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime
inl := strings.ToLower(in)
if inl != query && inl != header {
// panic because this is most likely a typo
- panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\"."))
+ panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\""))
}
var getToken func(*http.Request) string
@@ -226,12 +227,12 @@ func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Aut
}
if token == "" {
qs := r.Request.URL.Query()
- token = qs.Get("access_token")
+ token = qs.Get(accessTokenParam)
}
//#nosec
ct, _, _ := runtime.ContentType(r.Request.Header)
if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") {
- token = r.Request.FormValue("access_token")
+ token = r.Request.FormValue(accessTokenParam)
}
if token == "" {
@@ -256,12 +257,12 @@ func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runti
}
if token == "" {
qs := r.Request.URL.Query()
- token = qs.Get("access_token")
+ token = qs.Get(accessTokenParam)
}
//#nosec
ct, _, _ := runtime.ContentType(r.Request.Header)
if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") {
- token = r.Request.FormValue("access_token")
+ token = r.Request.FormValue(accessTokenParam)
}
if token == "" {
diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go
index b30d3771..a1a0a589 100644
--- a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go
+++ b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go
@@ -18,8 +18,7 @@ import (
"io"
"github.com/go-openapi/runtime"
-
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
// YAMLConsumer creates a consumer for yaml data
diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore
index dd91ed6a..f47cb204 100644
--- a/vendor/github.com/go-openapi/spec/.gitignore
+++ b/vendor/github.com/go-openapi/spec/.gitignore
@@ -1,2 +1 @@
-secrets.yml
-coverage.out
+*.out
diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml
index 835d55e7..22f8d21c 100644
--- a/vendor/github.com/go-openapi/spec/.golangci.yml
+++ b/vendor/github.com/go-openapi/spec/.golangci.yml
@@ -11,7 +11,7 @@ linters-settings:
threshold: 200
goconst:
min-len: 2
- min-occurrences: 2
+ min-occurrences: 3
linters:
enable-all: true
@@ -40,3 +40,22 @@ linters:
- tparallel
- thelper
- ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md
index 18782c6d..7fd2810c 100644
--- a/vendor/github.com/go-openapi/spec/README.md
+++ b/vendor/github.com/go-openapi/spec/README.md
@@ -1,8 +1,5 @@
-# OAI object model
+# OpenAPI v2 object model [](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/spec)
-[](https://travis-ci.org/go-openapi/spec)
-
-[](https://codecov.io/gh/go-openapi/spec)
[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE)
[](https://pkg.go.dev/github.com/go-openapi/spec)
@@ -32,3 +29,26 @@ The object model for OpenAPI specification documents.
> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
>
> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3
+
+* Does the unmarshaling support YAML?
+
+> Not directly. The exposed types know only how to unmarshal from JSON.
+>
+> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by
+> github.com/go-openapi/loads
+>
+> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec
+>
+> See also https://github.com/go-openapi/spec/issues/164
+
+* How can I validate a spec?
+
+> Validation is provided by [the validate package](http://github.com/go-openapi/validate)
+
+* Why do we have an `ID` field for `Schema` which is not part of the swagger spec?
+
+> We found jsonschema compatibility more important: since `id` in jsonschema influences
+> how `$ref` are resolved.
+> This `id` does not conflict with any property named `id`.
+>
+> See also https://github.com/go-openapi/spec/issues/23
diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml
deleted file mode 100644
index 09035939..00000000
--- a/vendor/github.com/go-openapi/spec/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: "0.1.{build}"
-
-clone_folder: C:\go-openapi\spec
-shallow_clone: true # for startup speed
-pull_requests:
- do_not_increment_build_number: true
-
-#skip_tags: true
-#skip_branch_with_pr: true
-
-# appveyor.yml
-build: off
-
-environment:
- GOPATH: c:\gopath
-
-stack: go 1.15
-
-test_script:
- - go test -v -timeout 20m ./...
-
-deploy: off
-
-notifications:
- - provider: Slack
- incoming_webhook: https://hooks.slack.com/services/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
- auth_token:
- secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4=
- channel: bots
- on_build_success: false
- on_build_failure: true
- on_build_status_changed: true
diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go
deleted file mode 100644
index afc83850..00000000
--- a/vendor/github.com/go-openapi/spec/bindata.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Code generated by go-bindata. DO NOT EDIT.
-// sources:
-// schemas/jsonschema-draft-04.json (4.357kB)
-// schemas/v2/schema.json (40.248kB)
-
-package spec
-
-import (
- "bytes"
- "compress/gzip"
- "crypto/sha256"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "time"
-)
-
-func bindataRead(data []byte, name string) ([]byte, error) {
- gz, err := gzip.NewReader(bytes.NewBuffer(data))
- if err != nil {
- return nil, fmt.Errorf("read %q: %v", name, err)
- }
-
- var buf bytes.Buffer
- _, err = io.Copy(&buf, gz)
- clErr := gz.Close()
-
- if err != nil {
- return nil, fmt.Errorf("read %q: %v", name, err)
- }
- if clErr != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-type asset struct {
- bytes []byte
- info os.FileInfo
- digest [sha256.Size]byte
-}
-
-type bindataFileInfo struct {
- name string
- size int64
- mode os.FileMode
- modTime time.Time
-}
-
-func (fi bindataFileInfo) Name() string {
- return fi.name
-}
-func (fi bindataFileInfo) Size() int64 {
- return fi.size
-}
-func (fi bindataFileInfo) Mode() os.FileMode {
- return fi.mode
-}
-func (fi bindataFileInfo) ModTime() time.Time {
- return fi.modTime
-}
-func (fi bindataFileInfo) IsDir() bool {
- return false
-}
-func (fi bindataFileInfo) Sys() interface{} {
- return nil
-}
-
-var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00")
-
-func jsonschemaDraft04JsonBytes() ([]byte, error) {
- return bindataRead(
- _jsonschemaDraft04Json,
- "jsonschema-draft-04.json",
- )
-}
-
-func jsonschemaDraft04Json() (*asset, error) {
- bytes, err := jsonschemaDraft04JsonBytes()
- if err != nil {
- return nil, err
- }
-
- info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}}
- return a, nil
-}
-
-var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00")
-
-func v2SchemaJsonBytes() ([]byte, error) {
- return bindataRead(
- _v2SchemaJson,
- "v2/schema.json",
- )
-}
-
-func v2SchemaJson() (*asset, error) {
- bytes, err := v2SchemaJsonBytes()
- if err != nil {
- return nil, err
- }
-
- info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}}
- return a, nil
-}
-
-// Asset loads and returns the asset for the given name.
-// It returns an error if the asset could not be found or
-// could not be loaded.
-func Asset(name string) ([]byte, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- if f, ok := _bindata[canonicalName]; ok {
- a, err := f()
- if err != nil {
- return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
- }
- return a.bytes, nil
- }
- return nil, fmt.Errorf("Asset %s not found", name)
-}
-
-// AssetString returns the asset contents as a string (instead of a []byte).
-func AssetString(name string) (string, error) {
- data, err := Asset(name)
- return string(data), err
-}
-
-// MustAsset is like Asset but panics when Asset would return an error.
-// It simplifies safe initialization of global variables.
-func MustAsset(name string) []byte {
- a, err := Asset(name)
- if err != nil {
- panic("asset: Asset(" + name + "): " + err.Error())
- }
-
- return a
-}
-
-// MustAssetString is like AssetString but panics when Asset would return an
-// error. It simplifies safe initialization of global variables.
-func MustAssetString(name string) string {
- return string(MustAsset(name))
-}
-
-// AssetInfo loads and returns the asset info for the given name.
-// It returns an error if the asset could not be found or
-// could not be loaded.
-func AssetInfo(name string) (os.FileInfo, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- if f, ok := _bindata[canonicalName]; ok {
- a, err := f()
- if err != nil {
- return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
- }
- return a.info, nil
- }
- return nil, fmt.Errorf("AssetInfo %s not found", name)
-}
-
-// AssetDigest returns the digest of the file with the given name. It returns an
-// error if the asset could not be found or the digest could not be loaded.
-func AssetDigest(name string) ([sha256.Size]byte, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- if f, ok := _bindata[canonicalName]; ok {
- a, err := f()
- if err != nil {
- return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
- }
- return a.digest, nil
- }
- return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
-}
-
-// Digests returns a map of all known files and their checksums.
-func Digests() (map[string][sha256.Size]byte, error) {
- mp := make(map[string][sha256.Size]byte, len(_bindata))
- for name := range _bindata {
- a, err := _bindata[name]()
- if err != nil {
- return nil, err
- }
- mp[name] = a.digest
- }
- return mp, nil
-}
-
-// AssetNames returns the names of the assets.
-func AssetNames() []string {
- names := make([]string, 0, len(_bindata))
- for name := range _bindata {
- names = append(names, name)
- }
- return names
-}
-
-// _bindata is a table, holding each asset generator, mapped to its name.
-var _bindata = map[string]func() (*asset, error){
- "jsonschema-draft-04.json": jsonschemaDraft04Json,
-
- "v2/schema.json": v2SchemaJson,
-}
-
-// AssetDir returns the file names below a certain
-// directory embedded in the file by go-bindata.
-// For example if you run go-bindata on data/... and data contains the
-// following hierarchy:
-// data/
-// foo.txt
-// img/
-// a.png
-// b.png
-// then AssetDir("data") would return []string{"foo.txt", "img"},
-// AssetDir("data/img") would return []string{"a.png", "b.png"},
-// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
-// AssetDir("") will return []string{"data"}.
-func AssetDir(name string) ([]string, error) {
- node := _bintree
- if len(name) != 0 {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- pathList := strings.Split(canonicalName, "/")
- for _, p := range pathList {
- node = node.Children[p]
- if node == nil {
- return nil, fmt.Errorf("Asset %s not found", name)
- }
- }
- }
- if node.Func != nil {
- return nil, fmt.Errorf("Asset %s not found", name)
- }
- rv := make([]string, 0, len(node.Children))
- for childName := range node.Children {
- rv = append(rv, childName)
- }
- return rv, nil
-}
-
-type bintree struct {
- Func func() (*asset, error)
- Children map[string]*bintree
-}
-
-var _bintree = &bintree{nil, map[string]*bintree{
- "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}},
- "v2": {nil, map[string]*bintree{
- "schema.json": {v2SchemaJson, map[string]*bintree{}},
- }},
-}}
-
-// RestoreAsset restores an asset under the given directory.
-func RestoreAsset(dir, name string) error {
- data, err := Asset(name)
- if err != nil {
- return err
- }
- info, err := AssetInfo(name)
- if err != nil {
- return err
- }
- err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
- if err != nil {
- return err
- }
- err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
- if err != nil {
- return err
- }
- return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
-}
-
-// RestoreAssets restores an asset under the given directory recursively.
-func RestoreAssets(dir, name string) error {
- children, err := AssetDir(name)
- // File
- if err != nil {
- return RestoreAsset(dir, name)
- }
- // Dir
- for _, child := range children {
- err = RestoreAssets(dir, filepath.Join(name, child))
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func _filePath(dir, name string) string {
- canonicalName := strings.Replace(name, "\\", "/", -1)
- return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
-}
diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go
new file mode 100644
index 00000000..1f428475
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/embed.go
@@ -0,0 +1,17 @@
+package spec
+
+import (
+ "embed"
+ "path"
+)
+
+//go:embed schemas/*.json schemas/*/*.json
+var assets embed.FS
+
+func jsonschemaDraft04JSONBytes() ([]byte, error) {
+ return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json"))
+}
+
+func v2SchemaJSONBytes() ([]byte, error) {
+ return assets.ReadFile(path.Join("schemas", "v2", "schema.json"))
+}
diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go
index d4ea889d..b81a5699 100644
--- a/vendor/github.com/go-openapi/spec/expander.go
+++ b/vendor/github.com/go-openapi/spec/expander.go
@@ -27,7 +27,6 @@ import (
// all relative $ref's will be resolved from there.
//
// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable.
-//
type ExpandOptions struct {
RelativeBase string // the path to the root document to expand. This is a file, not a directory
SkipSchemas bool // do not expand schemas, just paths, parameters and responses
@@ -58,7 +57,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
if !options.SkipSchemas {
for key, definition := range spec.Definitions {
parentRefs := make([]string, 0, 10)
- parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key))
+ parentRefs = append(parentRefs, "#/definitions/"+key)
def, err := expandSchema(definition, parentRefs, resolver, specBasePath)
if resolver.shouldStopOnError(err) {
@@ -103,15 +102,21 @@ const rootBase = ".root"
// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry
// for further $ref resolution
-//
-// Setting the cache is optional and this parameter may safely be left to nil.
func baseForRoot(root interface{}, cache ResolutionCache) string {
- if root == nil {
- return ""
- }
-
// cache the root document to resolve $ref's
normalizedBase := normalizeBase(rootBase)
+
+ if root == nil {
+ // ensure that we never leave a nil root: always cache the root base pseudo-document
+ cachedRoot, found := cache.Get(normalizedBase)
+ if found && cachedRoot != nil {
+ // the cache is already preloaded with a root
+ return normalizedBase
+ }
+
+ root = map[string]interface{}{}
+ }
+
cache.Set(normalizedBase, root)
return normalizedBase
@@ -208,7 +213,19 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba
}
if target.Ref.String() != "" {
- return expandSchemaRef(target, parentRefs, resolver, basePath)
+ if !resolver.options.SkipSchemas {
+ return expandSchemaRef(target, parentRefs, resolver, basePath)
+ }
+
+ // when "expand" with SkipSchema, we just rebase the existing $ref without replacing
+ // the full schema.
+ rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath))
+ if err != nil {
+ return nil, err
+ }
+ target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
+
+ return &target, nil
}
for k := range target.Definitions {
@@ -520,21 +537,25 @@ func getRefAndSchema(input interface{}) (*Ref, *Schema, error) {
}
func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error {
- ref, _, err := getRefAndSchema(input)
+ ref, sch, err := getRefAndSchema(input)
if err != nil {
return err
}
- if ref == nil {
+ if ref == nil && sch == nil { // nothing to do
return nil
}
parentRefs := make([]string, 0, 10)
- if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) {
- return err
+ if ref != nil {
+ // dereference this $ref
+ if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+
+ ref, sch, _ = getRefAndSchema(input)
}
- ref, sch, _ := getRefAndSchema(input)
if ref.String() != "" {
transitiveResolver := resolver.transitiveResolver(basePath, *ref)
basePath = resolver.updateBasePath(transitiveResolver, basePath)
@@ -546,6 +567,7 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa
if ref != nil {
*ref = Ref{}
}
+
return nil
}
@@ -555,38 +577,29 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa
return ern
}
- switch {
- case resolver.isCircular(&rebasedRef, basePath, parentRefs...):
+ if resolver.isCircular(&rebasedRef, basePath, parentRefs...) {
// this is a circular $ref: stop expansion
if !resolver.options.AbsoluteCircularRef {
sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
} else {
sch.Ref = rebasedRef
}
- case !resolver.options.SkipSchemas:
- // schema expanded to a $ref in another root
- sch.Ref = rebasedRef
- debugLog("rebased to: %s", sch.Ref.String())
- default:
- // skip schema expansion but rebase $ref to schema
- sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
}
}
+ // $ref expansion or rebasing is performed by expandSchema below
if ref != nil {
*ref = Ref{}
}
// expand schema
- if !resolver.options.SkipSchemas {
- s, err := expandSchema(*sch, parentRefs, resolver, basePath)
- if resolver.shouldStopOnError(err) {
- return err
- }
- if s == nil {
- // guard for when continuing on error
- return nil
- }
+ // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref)
+ s, err := expandSchema(*sch, parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return err
+ }
+
+ if s != nil { // guard for when continuing on error
*sch = *s
}
diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go
index c458b49b..582f0fd4 100644
--- a/vendor/github.com/go-openapi/spec/info.go
+++ b/vendor/github.com/go-openapi/spec/info.go
@@ -16,6 +16,7 @@ package spec
import (
"encoding/json"
+ "strconv"
"strings"
"github.com/go-openapi/jsonpointer"
@@ -40,6 +41,24 @@ func (e Extensions) GetString(key string) (string, bool) {
return "", false
}
+// GetInt gets a int value from the extensions
+func (e Extensions) GetInt(key string) (int, bool) {
+ realKey := strings.ToLower(key)
+
+ if v, ok := e.GetString(realKey); ok {
+ if r, err := strconv.Atoi(v); err == nil {
+ return r, true
+ }
+ }
+
+ if v, ok := e[realKey]; ok {
+ if r, rOk := v.(float64); rOk {
+ return int(r), true
+ }
+ }
+ return -1, false
+}
+
// GetBool gets a string value from the extensions
func (e Extensions) GetBool(key string) (bool, bool) {
if v, ok := e[strings.ToLower(key)]; ok {
diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go
index 2df07231..f19f1a8f 100644
--- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go
+++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go
@@ -40,5 +40,5 @@ func repairURI(in string) (*url.URL, string) {
return u, ""
}
-func fixWindowsURI(u *url.URL, in string) {
+func fixWindowsURI(_ *url.URL, _ string) {
}
diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go
index 995ce6ac..a69cca88 100644
--- a/vendor/github.com/go-openapi/spec/operation.go
+++ b/vendor/github.com/go-openapi/spec/operation.go
@@ -217,9 +217,12 @@ func (o *Operation) AddParam(param *Parameter) *Operation {
for i, p := range o.Parameters {
if p.Name == param.Name && p.In == param.In {
- params := append(o.Parameters[:i], *param)
+ params := make([]Parameter, 0, len(o.Parameters)+1)
+ params = append(params, o.Parameters[:i]...)
+ params = append(params, *param)
params = append(params, o.Parameters[i+1:]...)
o.Parameters = params
+
return o
}
}
diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go
index 2b2b89b6..bd4f1cdb 100644
--- a/vendor/github.com/go-openapi/spec/parameter.go
+++ b/vendor/github.com/go-openapi/spec/parameter.go
@@ -84,27 +84,27 @@ type ParamProps struct {
// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn).
//
// There are five possible parameter types.
-// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part
-// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`,
-// the path parameter is `itemId`.
-// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
-// * Header - Custom headers that are expected as part of the request.
-// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be
-// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for
-// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist
-// together for the same operation.
-// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or
-// `multipart/form-data` are used as the content type of the request (in Swagger's definition,
-// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used
-// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be
-// declared together with a body parameter for the same operation. Form parameters have a different format based on
-// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4).
-// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload.
-// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple
-// parameters that are being transferred.
-// * `multipart/form-data` - each parameter takes a section in the payload with an internal header.
-// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is
-// `submit-name`. This type of form parameters is more commonly used for file transfers.
+// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part
+// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`,
+// the path parameter is `itemId`.
+// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
+// - Header - Custom headers that are expected as part of the request.
+// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be
+// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for
+// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist
+// together for the same operation.
+// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or
+// `multipart/form-data` are used as the content type of the request (in Swagger's definition,
+// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used
+// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be
+// declared together with a body parameter for the same operation. Form parameters have a different format based on
+// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4).
+// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload.
+// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple
+// parameters that are being transferred.
+// - `multipart/form-data` - each parameter takes a section in the payload with an internal header.
+// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is
+// `submit-name`. This type of form parameters is more commonly used for file transfers.
//
// For more information: http://goo.gl/8us55a#parameterObject
type Parameter struct {
diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go
index 2af13787..91d2435f 100644
--- a/vendor/github.com/go-openapi/spec/properties.go
+++ b/vendor/github.com/go-openapi/spec/properties.go
@@ -42,8 +42,8 @@ func (items OrderSchemaItems) MarshalJSON() ([]byte, error) {
func (items OrderSchemaItems) Len() int { return len(items) }
func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] }
func (items OrderSchemaItems) Less(i, j int) (ret bool) {
- ii, oki := items[i].Extensions.GetString("x-order")
- ij, okj := items[j].Extensions.GetString("x-order")
+ ii, oki := items[i].Extensions.GetInt("x-order")
+ ij, okj := items[j].Extensions.GetInt("x-order")
if oki {
if okj {
defer func() {
@@ -56,7 +56,7 @@ func (items OrderSchemaItems) Less(i, j int) (ret bool) {
ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String()
}
}()
- return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int()
+ return ii < ij
}
return true
} else if okj {
diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go
index 4efb6f86..16c3076f 100644
--- a/vendor/github.com/go-openapi/spec/responses.go
+++ b/vendor/github.com/go-openapi/spec/responses.go
@@ -19,6 +19,7 @@ import (
"fmt"
"reflect"
"strconv"
+ "strings"
"github.com/go-openapi/swag"
)
@@ -62,6 +63,7 @@ func (r *Responses) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &r.ResponsesProps); err != nil {
return err
}
+
if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
return err
}
@@ -107,20 +109,31 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals responses from JSON
func (r *ResponsesProps) UnmarshalJSON(data []byte) error {
- var res map[string]Response
+ var res map[string]json.RawMessage
if err := json.Unmarshal(data, &res); err != nil {
- return nil
+ return err
}
+
if v, ok := res["default"]; ok {
- r.Default = &v
+ var defaultRes Response
+ if err := json.Unmarshal(v, &defaultRes); err != nil {
+ return err
+ }
+ r.Default = &defaultRes
delete(res, "default")
}
for k, v := range res {
- if nk, err := strconv.Atoi(k); err == nil {
- if r.StatusCodeResponses == nil {
- r.StatusCodeResponses = map[int]Response{}
+ if !strings.HasPrefix(k, "x-") {
+ var statusCodeResp Response
+ if err := json.Unmarshal(v, &statusCodeResp); err != nil {
+ return err
+ }
+ if nk, err := strconv.Atoi(k); err == nil {
+ if r.StatusCodeResponses == nil {
+ r.StatusCodeResponses = map[int]Response{}
+ }
+ r.StatusCodeResponses[nk] = statusCodeResp
}
- r.StatusCodeResponses[nk] = v
}
}
return nil
diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go
index b81175af..0059b99a 100644
--- a/vendor/github.com/go-openapi/spec/schema_loader.go
+++ b/vendor/github.com/go-openapi/spec/schema_loader.go
@@ -168,14 +168,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error)
normalized := normalizeBase(pth)
debugLog("loading doc from: %s", normalized)
- unescaped, err := url.PathUnescape(normalized)
- if err != nil {
- return nil, url.URL{}, false, err
- }
-
- u := url.URL{Path: unescaped}
-
- data, fromCache := r.cache.Get(u.RequestURI())
+ data, fromCache := r.cache.Get(normalized)
if fromCache {
return data, toFetch, fromCache, nil
}
diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json
new file mode 100644
index 00000000..bcbb8474
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json
@@ -0,0 +1,149 @@
+{
+ "id": "http://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "description": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "positiveInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
+ },
+ "simpleTypes": {
+ "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "$schema": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "minimum": 0,
+ "exclusiveMinimum": true
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "boolean",
+ "default": false
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxLength": { "$ref": "#/definitions/positiveInteger" },
+ "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/positiveInteger" },
+ "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxProperties": { "$ref": "#/definitions/positiveInteger" },
+ "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [ "maximum" ],
+ "exclusiveMinimum": [ "minimum" ]
+ },
+ "default": {}
+}
diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json
new file mode 100644
index 00000000..ebe10ed3
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json
@@ -0,0 +1,1607 @@
+{
+ "title": "A JSON Schema for Swagger 2.0 API.",
+ "id": "http://swagger.io/v2/schema.json#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "required": [
+ "swagger",
+ "info",
+ "paths"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "swagger": {
+ "type": "string",
+ "enum": [
+ "2.0"
+ ],
+ "description": "The Swagger version of this document."
+ },
+ "info": {
+ "$ref": "#/definitions/info"
+ },
+ "host": {
+ "type": "string",
+ "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$",
+ "description": "The host (name or ip) of the API. Example: 'swagger.io'"
+ },
+ "basePath": {
+ "type": "string",
+ "pattern": "^/",
+ "description": "The base path to the API. Example: '/api'."
+ },
+ "schemes": {
+ "$ref": "#/definitions/schemesList"
+ },
+ "consumes": {
+ "description": "A list of MIME types accepted by the API.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "produces": {
+ "description": "A list of MIME types the API can produce.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "paths": {
+ "$ref": "#/definitions/paths"
+ },
+ "definitions": {
+ "$ref": "#/definitions/definitions"
+ },
+ "parameters": {
+ "$ref": "#/definitions/parameterDefinitions"
+ },
+ "responses": {
+ "$ref": "#/definitions/responseDefinitions"
+ },
+ "security": {
+ "$ref": "#/definitions/security"
+ },
+ "securityDefinitions": {
+ "$ref": "#/definitions/securityDefinitions"
+ },
+ "tags": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/tag"
+ },
+ "uniqueItems": true
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ }
+ },
+ "definitions": {
+ "info": {
+ "type": "object",
+ "description": "General information about the API.",
+ "required": [
+ "version",
+ "title"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "title": {
+ "type": "string",
+ "description": "A unique and precise title of the API."
+ },
+ "version": {
+ "type": "string",
+ "description": "A semantic version number of the API."
+ },
+ "description": {
+ "type": "string",
+ "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed."
+ },
+ "termsOfService": {
+ "type": "string",
+ "description": "The terms of service for the API."
+ },
+ "contact": {
+ "$ref": "#/definitions/contact"
+ },
+ "license": {
+ "$ref": "#/definitions/license"
+ }
+ }
+ },
+ "contact": {
+ "type": "object",
+ "description": "Contact information for the owners of the API.",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The identifying name of the contact person/organization."
+ },
+ "url": {
+ "type": "string",
+ "description": "The URL pointing to the contact information.",
+ "format": "uri"
+ },
+ "email": {
+ "type": "string",
+ "description": "The email address of the contact person/organization.",
+ "format": "email"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "license": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the license type. It's encouraged to use an OSI compatible license."
+ },
+ "url": {
+ "type": "string",
+ "description": "The URL pointing to the license.",
+ "format": "uri"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "paths": {
+ "type": "object",
+ "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ },
+ "^/": {
+ "$ref": "#/definitions/pathItem"
+ }
+ },
+ "additionalProperties": false
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema"
+ },
+ "description": "One or more JSON objects describing the schemas being consumed and produced by the API."
+ },
+ "parameterDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/parameter"
+ },
+ "description": "One or more JSON representations for parameters"
+ },
+ "responseDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/response"
+ },
+ "description": "One or more JSON representations for responses"
+ },
+ "externalDocs": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "information about external documentation",
+ "required": [
+ "url"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string",
+ "format": "uri"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "examples": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "mimeType": {
+ "type": "string",
+ "description": "The MIME type of the HTTP message."
+ },
+ "operation": {
+ "type": "object",
+ "required": [
+ "responses"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "tags": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "summary": {
+ "type": "string",
+ "description": "A brief summary of the operation."
+ },
+ "description": {
+ "type": "string",
+ "description": "A longer description of the operation, GitHub Flavored Markdown is allowed."
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "operationId": {
+ "type": "string",
+ "description": "A unique identifier of the operation."
+ },
+ "produces": {
+ "description": "A list of MIME types the API can produce.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "consumes": {
+ "description": "A list of MIME types the API can consume.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "parameters": {
+ "$ref": "#/definitions/parametersList"
+ },
+ "responses": {
+ "$ref": "#/definitions/responses"
+ },
+ "schemes": {
+ "$ref": "#/definitions/schemesList"
+ },
+ "deprecated": {
+ "type": "boolean",
+ "default": false
+ },
+ "security": {
+ "$ref": "#/definitions/security"
+ }
+ }
+ },
+ "pathItem": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "$ref": {
+ "type": "string"
+ },
+ "get": {
+ "$ref": "#/definitions/operation"
+ },
+ "put": {
+ "$ref": "#/definitions/operation"
+ },
+ "post": {
+ "$ref": "#/definitions/operation"
+ },
+ "delete": {
+ "$ref": "#/definitions/operation"
+ },
+ "options": {
+ "$ref": "#/definitions/operation"
+ },
+ "head": {
+ "$ref": "#/definitions/operation"
+ },
+ "patch": {
+ "$ref": "#/definitions/operation"
+ },
+ "parameters": {
+ "$ref": "#/definitions/parametersList"
+ }
+ }
+ },
+ "responses": {
+ "type": "object",
+ "description": "Response objects names can either be any valid HTTP status code or 'default'.",
+ "minProperties": 1,
+ "additionalProperties": false,
+ "patternProperties": {
+ "^([0-9]{3})$|^(default)$": {
+ "$ref": "#/definitions/responseValue"
+ },
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "not": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ }
+ },
+ "responseValue": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/response"
+ },
+ {
+ "$ref": "#/definitions/jsonReference"
+ }
+ ]
+ },
+ "response": {
+ "type": "object",
+ "required": [
+ "description"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "$ref": "#/definitions/fileSchema"
+ }
+ ]
+ },
+ "headers": {
+ "$ref": "#/definitions/headers"
+ },
+ "examples": {
+ "$ref": "#/definitions/examples"
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "headers": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/header"
+ }
+ },
+ "header": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "vendorExtension": {
+ "description": "Any property starting with x- is valid.",
+ "additionalProperties": true,
+ "additionalItems": true
+ },
+ "bodyParameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "in",
+ "schema"
+ ],
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "body"
+ ]
+ },
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "schema": {
+ "$ref": "#/definitions/schema"
+ }
+ },
+ "additionalProperties": false
+ },
+ "headerParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "header"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "queryParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "query"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "allowEmptyValue": {
+ "type": "boolean",
+ "default": false,
+ "description": "allows sending a parameter by name only or with an empty value."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormatWithMulti"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "formDataParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "formData"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "allowEmptyValue": {
+ "type": "boolean",
+ "default": false,
+ "description": "allows sending a parameter by name only or with an empty value."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array",
+ "file"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormatWithMulti"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "pathParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "required": [
+ "required"
+ ],
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "enum": [
+ true
+ ],
+ "description": "Determines whether or not this parameter is required or optional."
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "path"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "nonBodyParameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "in",
+ "type"
+ ],
+ "oneOf": [
+ {
+ "$ref": "#/definitions/headerParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/formDataParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/queryParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/pathParameterSubSchema"
+ }
+ ]
+ },
+ "parameter": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/bodyParameter"
+ },
+ {
+ "$ref": "#/definitions/nonBodyParameter"
+ }
+ ]
+ },
+ "schema": {
+ "type": "object",
+ "description": "A deterministic version of a JSON Schema object.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "$ref": {
+ "type": "string"
+ },
+ "format": {
+ "type": "string"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "multipleOf": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+ },
+ "maximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "pattern": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+ },
+ "maxItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "uniqueItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+ },
+ "maxProperties": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minProperties": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "required": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+ },
+ "enum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+ },
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "type": "boolean"
+ }
+ ],
+ "default": {}
+ },
+ "type": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/type"
+ },
+ "items": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "$ref": "#/definitions/schema"
+ }
+ }
+ ],
+ "default": {}
+ },
+ "allOf": {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "$ref": "#/definitions/schema"
+ }
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema"
+ },
+ "default": {}
+ },
+ "discriminator": {
+ "type": "string"
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "xml": {
+ "$ref": "#/definitions/xml"
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "example": {}
+ },
+ "additionalProperties": false
+ },
+ "fileSchema": {
+ "type": "object",
+ "description": "A deterministic version of a JSON Schema object.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "format": {
+ "type": "string"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "required": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "file"
+ ]
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "example": {}
+ },
+ "additionalProperties": false
+ },
+ "primitivesItems": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "security": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/securityRequirement"
+ },
+ "uniqueItems": true
+ },
+ "securityRequirement": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ },
+ "xml": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "namespace": {
+ "type": "string"
+ },
+ "prefix": {
+ "type": "string"
+ },
+ "attribute": {
+ "type": "boolean",
+ "default": false
+ },
+ "wrapped": {
+ "type": "boolean",
+ "default": false
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "securityDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/basicAuthenticationSecurity"
+ },
+ {
+ "$ref": "#/definitions/apiKeySecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2ImplicitSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2PasswordSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2ApplicationSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2AccessCodeSecurity"
+ }
+ ]
+ }
+ },
+ "basicAuthenticationSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "basic"
+ ]
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "apiKeySecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "name",
+ "in"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "apiKey"
+ ]
+ },
+ "name": {
+ "type": "string"
+ },
+ "in": {
+ "type": "string",
+ "enum": [
+ "header",
+ "query"
+ ]
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2ImplicitSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "authorizationUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "implicit"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "authorizationUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2PasswordSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "password"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2ApplicationSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "application"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2AccessCodeSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "authorizationUrl",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "accessCode"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "authorizationUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2Scopes": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "mediaTypeList": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/mimeType"
+ },
+ "uniqueItems": true
+ },
+ "parametersList": {
+ "type": "array",
+ "description": "The parameters needed to send a valid API call.",
+ "additionalItems": false,
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/parameter"
+ },
+ {
+ "$ref": "#/definitions/jsonReference"
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+ "schemesList": {
+ "type": "array",
+ "description": "The transfer protocol of the API.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "http",
+ "https",
+ "ws",
+ "wss"
+ ]
+ },
+ "uniqueItems": true
+ },
+ "collectionFormat": {
+ "type": "string",
+ "enum": [
+ "csv",
+ "ssv",
+ "tsv",
+ "pipes"
+ ],
+ "default": "csv"
+ },
+ "collectionFormatWithMulti": {
+ "type": "string",
+ "enum": [
+ "csv",
+ "ssv",
+ "tsv",
+ "pipes",
+ "multi"
+ ],
+ "default": "csv"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "multipleOf": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+ },
+ "maximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "pattern": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+ },
+ "maxItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "uniqueItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+ },
+ "enum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+ },
+ "jsonReference": {
+ "type": "object",
+ "required": [
+ "$ref"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "$ref": {
+ "type": "string"
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go
index 7d38b6e6..876aa127 100644
--- a/vendor/github.com/go-openapi/spec/spec.go
+++ b/vendor/github.com/go-openapi/spec/spec.go
@@ -26,7 +26,7 @@ import (
const (
// SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs
SwaggerSchemaURL = "http://swagger.io/v2/schema.json#"
- // JSONSchemaURL the url for the json schema schema
+ // JSONSchemaURL the url for the json schema
JSONSchemaURL = "http://json-schema.org/draft-04/schema#"
)
@@ -41,7 +41,7 @@ func MustLoadJSONSchemaDraft04() *Schema {
// JSONSchemaDraft04 loads the json schema document for json shema draft04
func JSONSchemaDraft04() (*Schema, error) {
- b, err := Asset("jsonschema-draft-04.json")
+ b, err := jsonschemaDraft04JSONBytes()
if err != nil {
return nil, err
}
@@ -65,7 +65,7 @@ func MustLoadSwagger20Schema() *Schema {
// Swagger20Schema loads the swagger 2.0 schema from the embedded assets
func Swagger20Schema() (*Schema, error) {
- b, err := Asset("v2/schema.json")
+ b, err := v2SchemaJSONBytes()
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go
index 44722ffd..1590fd17 100644
--- a/vendor/github.com/go-openapi/spec/swagger.go
+++ b/vendor/github.com/go-openapi/spec/swagger.go
@@ -253,7 +253,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
// UnmarshalJSON converts this bool or schema object from a JSON structure
func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
var nw SchemaOrBool
- if len(data) >= 4 {
+ if len(data) > 0 {
if data[0] == '{' {
var sch Schema
if err := json.Unmarshal(data, &sch); err != nil {
@@ -261,7 +261,7 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
}
nw.Schema = &sch
}
- nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e')
+ nw.Allows = !bytes.Equal(data, []byte("false"))
}
*s = nw
return nil
diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/vendor/github.com/go-openapi/spec/url_go18.go
deleted file mode 100644
index 60b78515..00000000
--- a/vendor/github.com/go-openapi/spec/url_go18.go
+++ /dev/null
@@ -1,8 +0,0 @@
-//go:build !go1.19
-// +build !go1.19
-
-package spec
-
-import "net/url"
-
-var parseURL = url.Parse
diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go
index 392e3e63..5bdfe40b 100644
--- a/vendor/github.com/go-openapi/spec/url_go19.go
+++ b/vendor/github.com/go-openapi/spec/url_go19.go
@@ -1,6 +1,3 @@
-//go:build go1.19
-// +build go1.19
-
package spec
import "net/url"
diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml
index be4899cb..22f8d21c 100644
--- a/vendor/github.com/go-openapi/strfmt/.golangci.yml
+++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml
@@ -4,56 +4,58 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 31
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
+ min-occurrences: 3
linters:
- enable:
- - revive
- - goimports
- - gosec
+ enable-all: true
+ disable:
+ - maligned
- unparam
- - unconvert
- - predeclared
- - prealloc
- - misspell
-
- # disable:
- # - maligned
- # - lll
- # - gochecknoinits
- # - gochecknoglobals
- # - godox
- # - gocognit
- # - whitespace
- # - wsl
- # - funlen
- # - wrapcheck
- # - testpackage
- # - nlreturn
- # - gofumpt
- # - goerr113
- # - gci
- # - gomnd
- # - godot
- # - exhaustivestruct
- # - paralleltest
- # - varnamelen
- # - ireturn
- # - exhaustruct
- # #- thelper
-
-issues:
- exclude-rules:
- - path: bson.go
- text: "should be .*ObjectID"
- linters:
- - golint
- - stylecheck
-
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md
index 0cf89d77..f6b39c6c 100644
--- a/vendor/github.com/go-openapi/strfmt/README.md
+++ b/vendor/github.com/go-openapi/strfmt/README.md
@@ -1,8 +1,7 @@
-# Strfmt [](https://travis-ci.org/go-openapi/strfmt) [](https://codecov.io/gh/go-openapi/strfmt) [](https://slackin.goswagger.io)
-
+# Strfmt [](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/strfmt)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE)
[](http://godoc.org/github.com/go-openapi/strfmt)
-[](https://golangci.com)
[](https://goreportcard.com/report/github.com/go-openapi/strfmt)
This package exposes a registry of data types to support string formats in the go-openapi toolkit.
diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go
index a8a3604a..cfa9a526 100644
--- a/vendor/github.com/go-openapi/strfmt/bson.go
+++ b/vendor/github.com/go-openapi/strfmt/bson.go
@@ -39,10 +39,10 @@ func IsBSONObjectID(str string) bool {
// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID)
//
// swagger:strfmt bsonobjectid
-type ObjectId bsonprim.ObjectID //nolint:revive
+type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck
// NewObjectId creates a ObjectId from a Hex String
-func NewObjectId(hex string) ObjectId { //nolint:revive
+func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck
oid, err := bsonprim.ObjectIDFromHex(hex)
if err != nil {
panic(err)
@@ -135,7 +135,7 @@ func (id *ObjectId) UnmarshalBSON(data []byte) error {
// BSON document if the error is nil.
func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) {
oid := bsonprim.ObjectID(id)
- return bsontype.ObjectID, oid[:], nil
+ return bson.TypeObjectID, oid[:], nil
}
// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go
index a89a4de3..28137140 100644
--- a/vendor/github.com/go-openapi/strfmt/default.go
+++ b/vendor/github.com/go-openapi/strfmt/default.go
@@ -25,6 +25,7 @@ import (
"strings"
"github.com/asaskevich/govalidator"
+ "github.com/google/uuid"
"go.mongodb.org/mongo-driver/bson"
)
@@ -57,24 +58,35 @@ const (
// - long top-level domain names (e.g. example.london) are permitted
// - symbol unicode points are permitted (e.g. emoji) (not for top-level domain)
HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$`
- // UUIDPattern Regex for UUID that allows uppercase
- UUIDPattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$`
- // UUID3Pattern Regex for UUID3 that allows uppercase
- UUID3Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$`
- // UUID4Pattern Regex for UUID4 that allows uppercase
- UUID4Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$`
- // UUID5Pattern Regex for UUID5 that allows uppercase
- UUID5Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$`
+
// json null type
jsonNull = "null"
)
+const (
+ // UUIDPattern Regex for UUID that allows uppercase
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)`
+
+ // UUID3Pattern Regex for UUID3 that allows uppercase
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)`
+
+ // UUID4Pattern Regex for UUID4 that allows uppercase
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)`
+
+ // UUID5Pattern Regex for UUID5 that allows uppercase
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)`
+)
+
var (
rxHostname = regexp.MustCompile(HostnamePattern)
- rxUUID = regexp.MustCompile(UUIDPattern)
- rxUUID3 = regexp.MustCompile(UUID3Pattern)
- rxUUID4 = regexp.MustCompile(UUID4Pattern)
- rxUUID5 = regexp.MustCompile(UUID5Pattern)
)
// IsHostname returns true when the string is a valid hostname
@@ -99,24 +111,28 @@ func IsHostname(str string) bool {
return valid
}
-// IsUUID returns true is the string matches a UUID, upper case is allowed
+// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed
func IsUUID(str string) bool {
- return rxUUID.MatchString(str)
+ _, err := uuid.Parse(str)
+ return err == nil
}
-// IsUUID3 returns true is the string matches a UUID, upper case is allowed
+// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed
func IsUUID3(str string) bool {
- return rxUUID3.MatchString(str)
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(3)
}
-// IsUUID4 returns true is the string matches a UUID, upper case is allowed
+// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed
func IsUUID4(str string) bool {
- return rxUUID4.MatchString(str)
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(4)
}
-// IsUUID5 returns true is the string matches a UUID, upper case is allowed
+// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed
func IsUUID5(str string) bool {
- return rxUUID5.MatchString(str)
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(5)
}
// IsEmail validates an email address.
diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go
index ad3b3c35..888e107c 100644
--- a/vendor/github.com/go-openapi/strfmt/format.go
+++ b/vendor/github.com/go-openapi/strfmt/format.go
@@ -16,6 +16,7 @@ package strfmt
import (
"encoding"
+ stderrors "errors"
"fmt"
"reflect"
"strings"
@@ -94,7 +95,7 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry {
}
// MapStructureHookFunc is a decode hook function for mapstructure
-func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:gocyclo,cyclop
+func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) {
if from.Kind() != reflect.String {
return obj, nil
@@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //
case "datetime":
input := data
if len(input) == 0 {
- return nil, fmt.Errorf("empty string is an invalid datetime format")
+ return nil, stderrors.New("empty string is an invalid datetime format")
}
return ParseDateTime(input)
case "duration":
diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go
index 9bef4c3b..f08ba4da 100644
--- a/vendor/github.com/go-openapi/strfmt/time.go
+++ b/vendor/github.com/go-openapi/strfmt/time.go
@@ -76,6 +76,8 @@ const (
ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04"
// ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern.
ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05"
+ // short form of ISO8601TimeUniversalSortableDateTimePattern
+ ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02"
// DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6
DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$`
)
@@ -84,7 +86,7 @@ var (
rxDateTime = regexp.MustCompile(DateTimePattern)
// DateTimeFormats is the collection of formats used by ParseDateTime()
- DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern}
+ DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm}
// MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds)
MarshalFormat = RFC3339Millis
@@ -245,7 +247,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(i64))
- return bsontype.DateTime, buf, nil
+ return bson.TypeDateTime, buf, nil
}
// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
@@ -253,7 +255,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
// wishes to retain the data after returning.
func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error {
- if tpe == bsontype.Null {
+ if tpe == bson.TypeNull {
*t = DateTime{}
return nil
}
diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore
index d69b53ac..c4b1b64f 100644
--- a/vendor/github.com/go-openapi/swag/.gitignore
+++ b/vendor/github.com/go-openapi/swag/.gitignore
@@ -2,3 +2,4 @@ secrets.yml
vendor
Godeps
.idea
+*.out
diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml
index bf503e40..d2fafb8a 100644
--- a/vendor/github.com/go-openapi/swag/.golangci.yml
+++ b/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -1,54 +1,56 @@
linters-settings:
- govet:
- check-shadowing: true
- golint:
- min-confidence: 0
gocyclo:
- min-complexity: 25
- maligned:
- suggest-new: true
+ min-complexity: 45
dupl:
- threshold: 100
+ threshold: 200
goconst:
- min-len: 3
- min-occurrences: 2
+ min-len: 2
+ min-occurrences: 3
linters:
enable-all: true
disable:
- - maligned
+ - recvcheck
+ - unparam
- lll
- gochecknoinits
- gochecknoglobals
- - nlreturn
- - testpackage
- - wrapcheck
- - gomnd
- - exhaustive
- - exhaustivestruct
- - goerr113
- - wsl
- - whitespace
- - gofumpt
- - godot
- - nestif
- - godox
- funlen
- - gci
+ - godox
- gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
- paralleltest
+ - tparallel
- thelper
- - ifshort
- - gomoddirectives
- - cyclop
- - forcetypeassert
- - ireturn
- - tagliatelle
- - varnamelen
- - goimports
- - tenv
- - golint
- exhaustruct
- - nilnil
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
- nonamedreturns
- - nosnakecase
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ #- deadcode
+ #- interfacer
+ #- scopelint
+ #- varcheck
+ #- structcheck
+ #- golint
+ #- nosnakecase
+ #- maligned
+ #- goerr113
+ #- ifshort
+ #- gomnd
+ #- exhaustivestruct
diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md
new file mode 100644
index 00000000..e7f28ed6
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md
@@ -0,0 +1,52 @@
+# Benchmarks
+
+## Name mangling utilities
+
+```bash
+go test -bench XXX -run XXX -benchtime 30s
+```
+
+### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
+BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
+BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
+```
+
+### Benchmarks after PR #79
+
+~ x10 performance improvement and ~ /100 memory allocations.
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
+```
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
+```
diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md
index 217f6fa5..a7292229 100644
--- a/vendor/github.com/go-openapi/swag/README.md
+++ b/vendor/github.com/go-openapi/swag/README.md
@@ -1,7 +1,8 @@
-# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
+# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
-[](http://godoc.org/github.com/go-openapi/swag)
+[](https://pkg.go.dev/github.com/go-openapi/swag)
[](https://goreportcard.com/report/github.com/go-openapi/swag)
Contains a bunch of helper functions for go-openapi and go-swagger projects.
@@ -18,4 +19,5 @@ You may also use it standalone for your projects.
This repo has only few dependencies outside of the standard library:
-* YAML utilities depend on gopkg.in/yaml.v2
+* YAML utilities depend on `gopkg.in/yaml.v3`
+* `github.com/mailru/easyjson v0.7.7`
diff --git a/vendor/github.com/go-openapi/swag/errors.go b/vendor/github.com/go-openapi/swag/errors.go
new file mode 100644
index 00000000..6c67fbf9
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/errors.go
@@ -0,0 +1,15 @@
+package swag
+
+type swagError string
+
+const (
+ // ErrYAML is an error raised by YAML utilities
+ ErrYAML swagError = "yaml error"
+
+ // ErrLoader is an error raised by the file loader utility
+ ErrLoader swagError = "loader error"
+)
+
+func (e swagError) Error() string {
+ return string(e)
+}
diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go
new file mode 100644
index 00000000..20a359bb
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/initialism_index.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // commonInitialisms are common acronyms that are kept as whole uppercased words.
+ commonInitialisms *indexOfInitialisms
+
+ // initialisms is a slice of sorted initialisms
+ initialisms []string
+
+ // a copy of initialisms pre-baked as []rune
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune
+
+ isInitialism func(string) bool
+
+ maxAllocMatches int
+)
+
+func init() {
+ // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+ configuredInitialisms := map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "IPv4": true,
+ "IPv6": true,
+ "JSON": true,
+ "LHS": true,
+ "OAI": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+ }
+
+ // a thread-safe index of initialisms
+ commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+ maxAllocMatches = maxAllocHeuristic(initialismsRunes)
+
+ // a test function
+ isInitialism = commonInitialisms.isInitialism
+}
+
+func asRunes(in []string) [][]rune {
+ out := make([][]rune, len(in))
+ for i, initialism := range in {
+ out[i] = []rune(initialism)
+ }
+
+ return out
+}
+
+func asUpperCased(in []string) [][]rune {
+ out := make([][]rune, len(in))
+
+ for i, initialism := range in {
+ out[i] = []rune(upper(trim(initialism)))
+ }
+
+ return out
+}
+
+func maxAllocHeuristic(in [][]rune) int {
+ heuristic := make(map[rune]int)
+ for _, initialism := range in {
+ heuristic[initialism[0]]++
+ }
+
+ var maxAlloc int
+ for _, val := range heuristic {
+ if val > maxAlloc {
+ maxAlloc = val
+ }
+ }
+
+ return maxAlloc
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+ for _, word := range words {
+ // commonInitialisms[upper(word)] = true
+ commonInitialisms.add(upper(word))
+ }
+ // sort again
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+}
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+ sortMutex *sync.Mutex
+ index *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+ return &indexOfInitialisms{
+ sortMutex: new(sync.Mutex),
+ index: new(sync.Map),
+ }
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ for k, v := range initial {
+ m.index.Store(k, v)
+ }
+ return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+ _, ok := m.index.Load(key)
+ return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+ m.index.Store(key, true)
+ return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ m.index.Range(func(key, _ interface{}) bool {
+ k := key.(string)
+ result = append(result, k)
+ return true
+ })
+ sort.Sort(sort.Reverse(byInitialism(result)))
+ return
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+ return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+ if len(s[i]) != len(s[j]) {
+ return len(s[i]) < len(s[j])
+ }
+
+ return strings.Compare(s[i], s[j]) > 0
+}
diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go
index 7e9902ca..c7caa990 100644
--- a/vendor/github.com/go-openapi/swag/json.go
+++ b/vendor/github.com/go-openapi/swag/json.go
@@ -126,7 +126,8 @@ func ConcatJSON(blobs ...[]byte) []byte {
continue // don't know how to concatenate non container objects
}
- if len(b) < 3 { // yep empty but also the last one, so closing this thing
+ const minLengthIfNotEmpty = 3
+ if len(b) < minLengthIfNotEmpty { // yep empty but also the last one, so closing this thing
if i == last && a > 0 {
if err := buf.WriteByte(closing); err != nil {
log.Println(err)
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
index 00038c37..658a24b7 100644
--- a/vendor/github.com/go-openapi/swag/loading.go
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -21,6 +21,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"runtime"
"strings"
@@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = ""
var LoadHTTPCustomHeaders = map[string]string{}
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
-func LoadFromFileOrHTTP(path string) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
+func LoadFromFileOrHTTP(pth string) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
}
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
// timeout arg allows for per request overriding of the request timeout
-func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
+func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
}
-// LoadStrategy returns a loader function for a given path or uri
-func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
- if strings.HasPrefix(path, "http") {
+// LoadStrategy returns a loader function for a given path or URI.
+//
+// The load strategy returns the remote load for any path starting with `http`.
+// So this works for any URI with a scheme `http` or `https`.
+//
+// The fallback strategy is to call the local loader.
+//
+// The local loader takes a local file system path (absolute or relative) as argument,
+// or alternatively a `file://...` URI, **without host** (see also below for windows).
+//
+// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
+// especially on windows.
+//
+// Before the local loader is called, the given path is transformed:
+// - percent-encoded characters are unescaped
+// - simple paths (e.g. `./folder/file`) are passed as-is
+// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
+//
+// For paths provided as URIs with the "file" scheme, please note that:
+// - `file://` is simply stripped.
+// This means that the host part of the URI is not parsed at all.
+// For example, `file:///folder/file" becomes "/folder/file`,
+// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
+// Similarly, `file://./folder/file` yields `./folder/file`.
+// - on windows, `file://...` can take a host so as to specify an UNC share location.
+//
+// Reminder about windows-specifics:
+// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
+// - `file:///c:/folder/file` becomes `C:\folder\file`
+// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
+func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(pth, "http") {
return remote
}
- return func(pth string) ([]byte, error) {
- upth, err := pathUnescape(pth)
+
+ return func(p string) ([]byte, error) {
+ upth, err := url.PathUnescape(p)
if err != nil {
return nil, err
}
- if strings.HasPrefix(pth, `file://`) {
- if runtime.GOOS == "windows" {
- // support for canonical file URIs on windows.
- // Zero tolerance here for dodgy URIs.
- u, _ := url.Parse(upth)
- if u.Host != "" {
- // assume UNC name (volume share)
- // file://host/share/folder\... ==> \\host\share\path\folder
- // NOTE: UNC port not yet supported
- upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
- } else {
- // file:///c:/folder/... ==> just remove the leading slash
- upth = strings.TrimPrefix(upth, `file:///`)
- }
- } else {
- upth = strings.TrimPrefix(upth, `file://`)
+ if !strings.HasPrefix(p, `file://`) {
+ // regular file path provided: just normalize slashes
+ return local(filepath.FromSlash(upth))
+ }
+
+ if runtime.GOOS != "windows" {
+ // crude processing: this leaves full URIs with a host with a (mostly) unexpected result
+ upth = strings.TrimPrefix(upth, `file://`)
+
+ return local(filepath.FromSlash(upth))
+ }
+
+ // windows-only pre-processing of file://... URIs
+
+ // support for canonical file URIs on windows.
+ u, err := url.Parse(filepath.ToSlash(upth))
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Host != "" {
+ // assume UNC name (volume share)
+ // NOTE: UNC port not yet supported
+
+ // when the "host" segment is a drive letter:
+ // file://C:/folder/... => C:\folder
+ upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
+ if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
+ // tolerance: if we have a leading dot, this can't be a host
+ // file://host/share/folder\... ==> \\host\share\path\folder
+ upth = "//" + upth
+ }
+ } else {
+ // no host, let's figure out if this is a drive letter
+ upth = strings.TrimPrefix(upth, `file://`)
+ first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
+ if strings.HasSuffix(first, ":") {
+ // drive letter in the first segment:
+ // file:///c:/folder/... ==> strip the leading slash
+ upth = strings.TrimPrefix(upth, `/`)
}
}
@@ -113,7 +168,7 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) {
}
if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
+ return nil, fmt.Errorf("could not access document at %q [%s]: %w", path, resp.Status, ErrLoader)
}
return io.ReadAll(resp.Body)
diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go
index aa7f6a9b..8bb64ac3 100644
--- a/vendor/github.com/go-openapi/swag/name_lexem.go
+++ b/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -14,74 +14,80 @@
package swag
-import "unicode"
+import (
+ "unicode"
+ "unicode/utf8"
+)
type (
- nameLexem interface {
- GetUnsafeGoName() string
- GetOriginal() string
- IsInitialism() bool
- }
+ lexemKind uint8
- initialismNameLexem struct {
+ nameLexem struct {
original string
matchedInitialism string
- }
-
- casualNameLexem struct {
- original string
+ kind lexemKind
}
)
-func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
- return &initialismNameLexem{
+const (
+ lexemKindCasualName lexemKind = iota
+ lexemKindInitialismName
+)
+
+func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
+ return nameLexem{
+ kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
-func newCasualNameLexem(original string) *casualNameLexem {
- return &casualNameLexem{
+func newCasualNameLexem(original string) nameLexem {
+ return nameLexem{
+ kind: lexemKindCasualName,
original: original,
}
}
-func (l *initialismNameLexem) GetUnsafeGoName() string {
- return l.matchedInitialism
-}
+func (l nameLexem) GetUnsafeGoName() string {
+ if l.kind == lexemKindInitialismName {
+ return l.matchedInitialism
+ }
+
+ var (
+ first rune
+ rest string
+ )
-func (l *casualNameLexem) GetUnsafeGoName() string {
- var first rune
- var rest string
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
+
if i > 0 {
rest = l.original[i:]
break
}
}
+
if len(l.original) > 1 {
- return string(unicode.ToUpper(first)) + lower(rest)
+ b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(b)
+ }()
+ b.WriteRune(unicode.ToUpper(first))
+ b.WriteString(lower(rest))
+ return b.String()
}
return l.original
}
-func (l *initialismNameLexem) GetOriginal() string {
+func (l nameLexem) GetOriginal() string {
return l.original
}
-func (l *casualNameLexem) GetOriginal() string {
- return l.original
-}
-
-func (l *initialismNameLexem) IsInitialism() bool {
- return true
-}
-
-func (l *casualNameLexem) IsInitialism() bool {
- return false
+func (l nameLexem) IsInitialism() bool {
+ return l.kind == lexemKindInitialismName
}
diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go
deleted file mode 100644
index 7c7da9c0..00000000
--- a/vendor/github.com/go-openapi/swag/post_go19.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Since go1.9, this may be implemented with sync.Map.
-type indexOfInitialisms struct {
- sortMutex *sync.Mutex
- index *sync.Map
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- sortMutex: new(sync.Mutex),
- index: new(sync.Map),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- for k, v := range initial {
- m.index.Store(k, v)
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- _, ok := m.index.Load(key)
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.index.Store(key, true)
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- m.index.Range(func(key, value interface{}) bool {
- k := key.(string)
- result = append(result, k)
- return true
- })
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go
deleted file mode 100644
index 0565db37..00000000
--- a/vendor/github.com/go-openapi/swag/pre_go19.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.9
-// +build !go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Before go1.9, this may be implemented with a mutex on the map.
-type indexOfInitialisms struct {
- getMutex *sync.Mutex
- index map[string]bool
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- getMutex: new(sync.Mutex),
- index: make(map[string]bool, 50),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k, v := range initial {
- m.index[k] = v
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- _, ok := m.index[key]
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- m.index[key] = true
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k := range m.index {
- result = append(result, k)
- }
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go
index a1825fb7..274727a8 100644
--- a/vendor/github.com/go-openapi/swag/split.go
+++ b/vendor/github.com/go-openapi/swag/split.go
@@ -15,124 +15,269 @@
package swag
import (
+ "bytes"
+ "sync"
"unicode"
+ "unicode/utf8"
)
-var nameReplaceTable = map[rune]string{
- '@': "At ",
- '&': "And ",
- '|': "Pipe ",
- '$': "Dollar ",
- '!': "Bang ",
- '-': "",
- '_': "",
-}
-
type (
splitter struct {
- postSplitInitialismCheck bool
initialisms []string
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
+ postSplitInitialismCheck bool
}
- splitterOption func(*splitter) *splitter
+ splitterOption func(*splitter)
+
+ initialismMatch struct {
+ body []rune
+ start, end int
+ complete bool
+ }
+ initialismMatches []initialismMatch
)
-// split calls the splitter; splitter provides more control and post options
-func split(str string) []string {
- lexems := newSplitter().split(str)
- result := make([]string, 0, len(lexems))
+type (
+ // memory pools of temporary objects.
+ //
+ // These are used to recycle temporarily allocated objects
+ // and relieve the GC from undue pressure.
- for _, lexem := range lexems {
+ matchesPool struct {
+ *sync.Pool
+ }
+
+ buffersPool struct {
+ *sync.Pool
+ }
+
+ lexemsPool struct {
+ *sync.Pool
+ }
+
+ splittersPool struct {
+ *sync.Pool
+ }
+)
+
+var (
+ // poolOfMatches holds temporary slices for recycling during the initialism match process
+ poolOfMatches = matchesPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make(initialismMatches, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfBuffers = buffersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+
+ poolOfLexems = lexemsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make([]nameLexem, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfSplitters = splittersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := newSplitter()
+
+ return &s
+ },
+ },
+ }
+)
+
+// nameReplaceTable finds a word representation for special characters.
+func nameReplaceTable(r rune) (string, bool) {
+ switch r {
+ case '@':
+ return "At ", true
+ case '&':
+ return "And ", true
+ case '|':
+ return "Pipe ", true
+ case '$':
+ return "Dollar ", true
+ case '!':
+ return "Bang ", true
+ case '-':
+ return "", true
+ case '_':
+ return "", true
+ default:
+ return "", false
+ }
+}
+
+// split calls the splitter.
+//
+// Use newSplitter for more control and options
+func split(str string) []string {
+ s := poolOfSplitters.BorrowSplitter()
+ lexems := s.split(str)
+ result := make([]string, 0, len(*lexems))
+
+ for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
+ poolOfLexems.RedeemLexems(lexems)
+ poolOfSplitters.RedeemSplitter(s)
return result
}
-func (s *splitter) split(str string) []nameLexem {
- return s.toNameLexems(str)
-}
-
-func newSplitter(options ...splitterOption) *splitter {
- splitter := &splitter{
+func newSplitter(options ...splitterOption) splitter {
+ s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
+ initialismsRunes: initialismsRunes,
+ initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
- splitter = option(splitter)
+ option(&s)
}
- return splitter
-}
-
-// withPostSplitInitialismCheck allows to catch initialisms after main split process
-func withPostSplitInitialismCheck(s *splitter) *splitter {
- s.postSplitInitialismCheck = true
return s
}
-type (
- initialismMatch struct {
- start, end int
- body []rune
- complete bool
- }
- initialismMatches []*initialismMatch
-)
+// withPostSplitInitialismCheck allows to catch initialisms after main split process
+func withPostSplitInitialismCheck(s *splitter) {
+ s.postSplitInitialismCheck = true
+}
-func (s *splitter) toNameLexems(name string) []nameLexem {
+func (p matchesPool) BorrowMatches() *initialismMatches {
+ s := p.Get().(*initialismMatches)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
+ s := p.Get().(*bytes.Buffer)
+ s.Reset()
+
+ if s.Cap() < size {
+ s.Grow(size)
+ }
+
+ return s
+}
+
+func (p lexemsPool) BorrowLexems() *[]nameLexem {
+ s := p.Get().(*[]nameLexem)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
+ s := p.Get().(*splitter)
+ s.postSplitInitialismCheck = false // reset options
+ for _, apply := range options {
+ apply(s)
+ }
+
+ return s
+}
+
+func (p matchesPool) RedeemMatches(s *initialismMatches) {
+ p.Put(s)
+}
+
+func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
+ p.Put(s)
+}
+
+func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
+ p.Put(s)
+}
+
+func (p splittersPool) RedeemSplitter(s *splitter) {
+ p.Put(s)
+}
+
+func (m initialismMatch) isZero() bool {
+ return m.start == 0 && m.end == 0
+}
+
+func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
+ if matches == nil {
+ return poolOfLexems.BorrowLexems()
+ }
+
return s.mapMatchesToNameLexems(nameRunes, matches)
}
-func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
- matches := make(initialismMatches, 0)
+func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
+ var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
- newMatches := make(initialismMatches, 0, len(matches))
+ // recycle these allocations as we loop over runes
+ // with such recycling, only 2 slices should be allocated per call
+ // instead of o(n).
+ newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
- for _, match := range matches {
- if keepCompleteMatch := match.complete; keepCompleteMatch {
- newMatches = append(newMatches, match)
- continue
- }
-
- // drop failed match
- currentMatchRune := match.body[currentRunePosition-match.start]
- if !s.initialismRuneEqual(currentMatchRune, currentRune) {
- continue
- }
-
- // try to complete ongoing match
- if currentRunePosition-match.start == len(match.body)-1 {
- // we are close; the next step is to check the symbol ahead
- // if it is a small letter, then it is not the end of match
- // but beginning of the next word
-
- if currentRunePosition < len(nameRunes)-1 {
- nextRune := nameRunes[currentRunePosition+1]
- if newWord := unicode.IsLower(nextRune); newWord {
- // oh ok, it was the start of a new word
- continue
- }
+ if matches != nil { // skip first iteration
+ for _, match := range *matches {
+ if keepCompleteMatch := match.complete; keepCompleteMatch {
+ *newMatches = append(*newMatches, match)
+ continue
}
- match.complete = true
- match.end = currentRunePosition
- }
+ // drop failed match
+ currentMatchRune := match.body[currentRunePosition-match.start]
+ if currentMatchRune != currentRune {
+ continue
+ }
- newMatches = append(newMatches, match)
+ // try to complete ongoing match
+ if currentRunePosition-match.start == len(match.body)-1 {
+ // we are close; the next step is to check the symbol ahead
+ // if it is a small letter, then it is not the end of match
+ // but beginning of the next word
+
+ if currentRunePosition < len(nameRunes)-1 {
+ nextRune := nameRunes[currentRunePosition+1]
+ if newWord := unicode.IsLower(nextRune); newWord {
+ // oh ok, it was the start of a new word
+ continue
+ }
+ }
+
+ match.complete = true
+ match.end = currentRunePosition
+ }
+
+ *newMatches = append(*newMatches, match)
+ }
}
// check for new initialism matches
- for _, initialism := range s.initialisms {
- initialismRunes := []rune(initialism)
- if s.initialismRuneEqual(initialismRunes[0], currentRune) {
- newMatches = append(newMatches, &initialismMatch{
+ for i := range s.initialisms {
+ initialismRunes := s.initialismsRunes[i]
+ if initialismRunes[0] == currentRune {
+ *newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
+ if matches != nil {
+ poolOfMatches.RedeemMatches(matches)
+ }
matches = newMatches
}
+ // up to the caller to redeem this last slice
return matches
}
-func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
- nameLexems := make([]nameLexem, 0)
+func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
+ nameLexems := poolOfLexems.BorrowLexems()
- var lastAcceptedMatch *initialismMatch
- for _, match := range matches {
+ var lastAcceptedMatch initialismMatch
+ for _, match := range *matches {
if !match.complete {
continue
}
- if firstMatch := lastAcceptedMatch == nil; firstMatch {
- nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
+ s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
- nameLexems = append(nameLexems, s.breakCasualString(middle)...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ s.appendBrokenDownCasualString(nameLexems, middle)
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
- if lastAcceptedMatch == nil {
- return s.breakCasualString(nameRunes)
+ if lastAcceptedMatch.isZero() {
+ *nameLexems = (*nameLexems)[:0]
+ s.appendBrokenDownCasualString(nameLexems, nameRunes)
+ } else if lastAcceptedMatch.end+1 != len(nameRunes) {
+ rest := nameRunes[lastAcceptedMatch.end+1:]
+ s.appendBrokenDownCasualString(nameLexems, rest)
}
- if lastAcceptedMatch.end+1 != len(nameRunes) {
- rest := nameRunes[lastAcceptedMatch.end+1:]
- nameLexems = append(nameLexems, s.breakCasualString(rest)...)
- }
+ poolOfMatches.RedeemMatches(matches)
return nameLexems
}
-func (s *splitter) initialismRuneEqual(a, b rune) bool {
- return a == b
-}
-
-func (s *splitter) breakInitialism(original string) nameLexem {
+func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
-func (s *splitter) breakCasualString(str []rune) []nameLexem {
- segments := make([]nameLexem, 0)
- currentSegment := ""
+func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
+ currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
+ defer func() {
+ poolOfBuffers.RedeemBuffer(currentSegment)
+ }()
addCasualNameLexem := func(original string) {
- segments = append(segments, newCasualNameLexem(original))
+ *segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
- segments = append(segments, newInitialismNameLexem(original, match))
+ *segments = append(*segments, newInitialismNameLexem(original, match))
}
- addNameLexem := func(original string) {
- if s.postSplitInitialismCheck {
- for _, initialism := range s.initialisms {
- if upper(initialism) == upper(original) {
- addInitialismNameLexem(original, initialism)
+ var addNameLexem func(string)
+ if s.postSplitInitialismCheck {
+ addNameLexem = func(original string) {
+ for i := range s.initialisms {
+ if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
+ addInitialismNameLexem(original, s.initialisms[i])
+
return
}
}
- }
- addCasualNameLexem(original)
+ addCasualNameLexem(original)
+ }
+ } else {
+ addNameLexem = addCasualNameLexem
}
- for _, rn := range string(str) {
- if replace, found := nameReplaceTable[rn]; found {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ for _, rn := range str {
+ if replace, found := nameReplaceTable(rn); found {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
if replace != "" {
@@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
- currentSegment = ""
+ currentSegment.Reset()
}
- currentSegment += string(rn)
+ currentSegment.WriteRune(rn)
}
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
-
- return segments
+}
+
+// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
+// it ignores leading and trailing blank spaces in the compared
+// string.
+//
+// base is assumed to be composed of upper-cased runes, and be already
+// trimmed.
+//
+// This code is heavily inspired from strings.EqualFold.
+func isEqualFoldIgnoreSpace(base []rune, str string) bool {
+ var i, baseIndex int
+ // equivalent to b := []byte(str), but without data copy
+ b := hackStringBytes(str)
+
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ break
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += size
+ }
+
+ if i >= len(b) {
+ return len(base) == 0
+ }
+
+ for _, baseRune := range base {
+ if i >= len(b) {
+ break
+ }
+
+ if c := b[i]; c < utf8.RuneSelf {
+ // single byte rune case (ASCII)
+ if baseRune >= utf8.RuneSelf {
+ return false
+ }
+
+ baseChar := byte(baseRune)
+ if c != baseChar &&
+ !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
+ return false
+ }
+
+ baseIndex++
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.ToUpper(r) != baseRune {
+ return false
+ }
+ baseIndex++
+ i += size
+ }
+
+ if baseIndex != len(base) {
+ return false
+ }
+
+ // all passed: now we should only have blanks
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ return false
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ return false
+ }
+
+ i += size
+ }
+
+ return true
}
diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go
new file mode 100644
index 00000000..90745d5c
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/string_bytes.go
@@ -0,0 +1,8 @@
+package swag
+
+import "unsafe"
+
+// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
+func hackStringBytes(str string) []byte {
+ return unsafe.Slice(unsafe.StringData(str), len(str))
+}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
index d971fbe3..5051401c 100644
--- a/vendor/github.com/go-openapi/swag/util.go
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
+ "unicode/utf8"
)
-// commonInitialisms are common acronyms that are kept as whole uppercased words.
-var commonInitialisms *indexOfInitialisms
-
-// initialisms is a slice of sorted initialisms
-var initialisms []string
-
-var isInitialism func(string) bool
-
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
+// The prefix function is assumed to return a string that starts with an upper case letter.
+//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
-func init() {
- // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
- var configuredInitialisms = map[string]bool{
- "ACL": true,
- "API": true,
- "ASCII": true,
- "CPU": true,
- "CSS": true,
- "DNS": true,
- "EOF": true,
- "GUID": true,
- "HTML": true,
- "HTTPS": true,
- "HTTP": true,
- "ID": true,
- "IP": true,
- "IPv4": true,
- "IPv6": true,
- "JSON": true,
- "LHS": true,
- "OAI": true,
- "QPS": true,
- "RAM": true,
- "RHS": true,
- "RPC": true,
- "SLA": true,
- "SMTP": true,
- "SQL": true,
- "SSH": true,
- "TCP": true,
- "TLS": true,
- "TTL": true,
- "UDP": true,
- "UI": true,
- "UID": true,
- "UUID": true,
- "URI": true,
- "URL": true,
- "UTF8": true,
- "VM": true,
- "XML": true,
- "XMPP": true,
- "XSRF": true,
- "XSS": true,
+func prefixFunc(name, in string) string {
+ if GoNamePrefixFunc == nil {
+ return "X" + in
}
- // a thread-safe index of initialisms
- commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
- initialisms = commonInitialisms.sorted()
-
- // a test function
- isInitialism = commonInitialisms.isInitialism
+ return GoNamePrefixFunc(name) + in
}
const (
@@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string {
return result
}
-type byInitialism []string
-
-func (s byInitialism) Len() int {
- return len(s)
-}
-func (s byInitialism) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s byInitialism) Less(i, j int) bool {
- if len(s[i]) != len(s[j]) {
- return len(s[i]) < len(s[j])
- }
-
- return strings.Compare(s[i], s[j]) > 0
-}
-
// Removes leading whitespaces
func trim(str string) string {
- return strings.Trim(str, " ")
+ return strings.TrimSpace(str)
}
// Shortcut to strings.ToUpper()
@@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
-func Camelize(word string) (camelized string) {
+func Camelize(word string) string {
+ camelized := poolOfBuffers.BorrowBuffer(len(word))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(camelized)
+ }()
+
for pos, ru := range []rune(word) {
if pos > 0 {
- camelized += string(unicode.ToLower(ru))
+ camelized.WriteRune(unicode.ToLower(ru))
} else {
- camelized += string(unicode.ToUpper(ru))
+ camelized.WriteRune(unicode.ToUpper(ru))
}
}
- return
+ return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@@ -224,33 +162,40 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
- out := make([]string, 0, len(in))
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ out := make([]string, 0, len(*in))
- for _, w := range in {
+ for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
- out = append(out, w.GetOriginal())
+ out = append(out, trim(w.GetOriginal()))
}
}
+ poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
- out := make([]string, 0, len(in))
- for _, w := range in {
- original := w.GetOriginal()
+ out := make([]string, 0, len(*in))
+ for _, w := range *in {
+ original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
} else {
out = append(out, original)
}
}
+ poolOfLexems.RedeemLexems(in)
+
return strings.Join(out, " ")
}
@@ -264,7 +209,7 @@ func ToJSONName(name string) string {
out = append(out, lower(w))
continue
}
- out = append(out, Camelize(w))
+ out = append(out, Camelize(trim(w)))
}
return strings.Join(out, "")
}
@@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
- lexems := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ lexems := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ defer func() {
+ poolOfLexems.RedeemLexems(lexems)
+ }()
+ lexemes := *lexems
- result := ""
- for _, lexem := range lexems {
+ if len(lexemes) == 0 {
+ return ""
+ }
+
+ result := poolOfBuffers.BorrowBuffer(len(name))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(result)
+ }()
+
+ // check if not starting with a letter, upper case
+ firstPart := lexemes[0].GetUnsafeGoName()
+ if lexemes[0].IsInitialism() {
+ firstPart = upper(firstPart)
+ }
+
+ if c := firstPart[0]; c < utf8.RuneSelf {
+ // ASCII
+ switch {
+ case 'A' <= c && c <= 'Z':
+ result.WriteString(firstPart)
+ case 'a' <= c && c <= 'z':
+ result.WriteByte(c - 'a' + 'A')
+ result.WriteString(firstPart[1:])
+ default:
+ result.WriteString(prefixFunc(name, firstPart))
+ // NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
+ // assume this is always the case
+ }
+ } else {
+ // unicode
+ firstRune, _ := utf8.DecodeRuneInString(firstPart)
+ switch {
+ case !unicode.IsLetter(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ case !unicode.IsUpper(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ /*
+ result.WriteRune(unicode.ToUpper(firstRune))
+ result.WriteString(firstPart[offset:])
+ */
+ default:
+ result.WriteString(firstPart)
+ }
+ }
+
+ for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
- result += goName
+ result.WriteString(goName)
}
- if len(result) > 0 {
- // Only prefix with X when the first character isn't an ascii letter
- first := []rune(result)[0]
- if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
- if GoNamePrefixFunc == nil {
- return "X" + result
- }
- result = GoNamePrefixFunc(name) + result
- }
- first = []rune(result)[0]
- if unicode.IsLetter(first) && !unicode.IsUpper(first) {
- result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
- }
- }
-
- return result
+ return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@@ -343,7 +323,7 @@ type zeroable interface {
func IsZero(data interface{}) bool {
v := reflect.ValueOf(data)
// check for nil data
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if v.IsNil() {
return true
@@ -356,7 +336,7 @@ func IsZero(data interface{}) bool {
}
// continue with slightly more complex reflection
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.String:
return v.Len() == 0
case reflect.Bool:
@@ -376,16 +356,6 @@ func IsZero(data interface{}) bool {
}
}
-// AddInitialisms add additional initialisms
-func AddInitialisms(words ...string) {
- for _, word := range words {
- // commonInitialisms[upper(word)] = true
- commonInitialisms.add(upper(word))
- }
- // sort again
- initialisms = commonInitialisms.sorted()
-}
-
// CommandLineOptionsGroup represents a group of user-defined command line options
type CommandLineOptionsGroup struct {
ShortDescription string
diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go
index f09ee609..57534653 100644
--- a/vendor/github.com/go-openapi/swag/yaml.go
+++ b/vendor/github.com/go-openapi/swag/yaml.go
@@ -18,6 +18,8 @@ import (
"encoding/json"
"fmt"
"path/filepath"
+ "reflect"
+ "sort"
"strconv"
"github.com/mailru/easyjson/jlexer"
@@ -48,7 +50,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
- return nil, fmt.Errorf("only YAML documents that are objects are supported")
+ return nil, fmt.Errorf("only YAML documents that are objects are supported: %w", ErrYAML)
}
return &document, nil
}
@@ -66,31 +68,32 @@ func yamlNode(root *yaml.Node) (interface{}, error) {
case yaml.AliasNode:
return yamlNode(root.Alias)
default:
- return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind)
+ return nil, fmt.Errorf("unsupported YAML node type: %v: %w", root.Kind, ErrYAML)
}
}
func yamlDocument(node *yaml.Node) (interface{}, error) {
if len(node.Content) != 1 {
- return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content))
+ return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML)
}
return yamlNode(node.Content[0])
}
func yamlMapping(node *yaml.Node) (interface{}, error) {
- m := make(JSONMapSlice, len(node.Content)/2)
+ const sensibleAllocDivider = 2
+ m := make(JSONMapSlice, len(node.Content)/sensibleAllocDivider)
var j int
for i := 0; i < len(node.Content); i += 2 {
var nmi JSONMapItem
k, err := yamlStringScalarC(node.Content[i])
if err != nil {
- return nil, fmt.Errorf("unable to decode YAML map key: %w", err)
+ return nil, fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML)
}
nmi.Key = k
v, err := yamlNode(node.Content[i+1])
if err != nil {
- return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err)
+ return nil, fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML)
}
nmi.Value = v
m[j] = nmi
@@ -106,7 +109,7 @@ func yamlSequence(node *yaml.Node) (interface{}, error) {
v, err := yamlNode(node.Content[i])
if err != nil {
- return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err)
+ return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML)
}
s = append(s, v)
}
@@ -129,39 +132,39 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
case yamlBoolScalar:
b, err := strconv.ParseBool(node.Value)
if err != nil {
- return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err)
+ return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w: %w", node.Value, err, ErrYAML)
}
return b, nil
case yamlIntScalar:
i, err := strconv.ParseInt(node.Value, 10, 64)
if err != nil {
- return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err)
+ return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w: %w", node.Value, err, ErrYAML)
}
return i, nil
case yamlFloatScalar:
f, err := strconv.ParseFloat(node.Value, 64)
if err != nil {
- return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err)
+ return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w: %w", node.Value, err, ErrYAML)
}
return f, nil
case yamlTimestamp:
return node.Value, nil
case yamlNull:
- return nil, nil
+ return nil, nil //nolint:nilnil
default:
- return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
+ return nil, fmt.Errorf("YAML tag %q is not supported: %w", node.LongTag(), ErrYAML)
}
}
func yamlStringScalarC(node *yaml.Node) (string, error) {
if node.Kind != yaml.ScalarNode {
- return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind)
+ return "", fmt.Errorf("expecting a string scalar but got %q: %w", node.Kind, ErrYAML)
}
switch node.LongTag() {
case yamlStringScalar, yamlIntScalar, yamlFloatScalar:
return node.Value, nil
default:
- return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag())
+ return "", fmt.Errorf("YAML tag %q is not supported as map key: %w", node.LongTag(), ErrYAML)
}
}
@@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
return yaml.Marshal(&n)
}
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func json2yaml(item interface{}) (*yaml.Node, error) {
+ if isNil(item) {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Value: "null",
+ }, nil
+ }
+
switch val := item.(type) {
case JSONMapSlice:
var n yaml.Node
@@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
case map[string]interface{}:
var n yaml.Node
n.Kind = yaml.MappingNode
- for k, v := range val {
+ keys := make([]string, 0, len(val))
+ for k := range val {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := val[k]
childNode, err := json2yaml(v)
if err != nil {
return nil, err
@@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
Tag: yamlBoolScalar,
Value: strconv.FormatBool(val),
}, nil
+ default:
+ return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML)
}
- return nil, nil
}
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
@@ -385,7 +416,7 @@ func transformData(input interface{}) (out interface{}, err error) {
case int64:
return strconv.FormatInt(k, 10), nil
default:
- return "", fmt.Errorf("unexpected map key type, got: %T", k)
+ return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML)
}
}
diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml
index 81818ca6..22f8d21c 100644
--- a/vendor/github.com/go-openapi/validate/.golangci.yml
+++ b/vendor/github.com/go-openapi/validate/.golangci.yml
@@ -1,12 +1,14 @@
linters-settings:
govet:
check-shadowing: true
+ golint:
+ min-confidence: 0
gocyclo:
- min-complexity: 50
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
min-occurrences: 3
@@ -15,36 +17,45 @@ linters:
enable-all: true
disable:
- maligned
+ - unparam
- lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - tparallel
+ - gofumpt
- paralleltest
- - cyclop # because we have gocyclo already
- # TODO: review the linters below. We disabled them to make the CI pass first.
- - ireturn
- - varnamelen
- - forcetypeassert
+ - tparallel
- thelper
- # Disable deprecated linters.
- # They will be removed from golangci-lint in future.
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
- interfacer
- - golint
\ No newline at end of file
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md
new file mode 100644
index 00000000..79cf6a07
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md
@@ -0,0 +1,31 @@
+# Benchmark
+
+Validating the Kubernetes Swagger API
+
+## v0.22.6: 60,000,000 allocs
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op
+```
+
+## After refact PR: minor but noticable improvements: 25,000,000 allocs
+```
+go test -bench Spec
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op
+```
+
+## After reduce GC pressure PR: 17,000,000 allocs
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op
+```
diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md
index ea2d68cb..e8e1bb21 100644
--- a/vendor/github.com/go-openapi/validate/README.md
+++ b/vendor/github.com/go-openapi/validate/README.md
@@ -1,7 +1,5 @@
-# Validation helpers
-[](https://travis-ci.org/go-openapi/validate)
-[](https://ci.appveyor.com/project/fredbi/validate/branch/master)
-[](https://codecov.io/gh/go-openapi/validate)
+# Validation helpers [](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/validate)
+
[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE)
[](https://pkg.go.dev/github.com/go-openapi/validate)
@@ -24,7 +22,7 @@ Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/m
* Minimum, Maximum, MultipleOf
* FormatOf
-[Documentation](https://godoc.org/github.com/go-openapi/validate)
+[Documentation](https://pkg.go.dev/github.com/go-openapi/validate)
## FAQ
diff --git a/vendor/github.com/go-openapi/validate/appveyor.yml b/vendor/github.com/go-openapi/validate/appveyor.yml
deleted file mode 100644
index 89e5bccb..00000000
--- a/vendor/github.com/go-openapi/validate/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: "0.1.{build}"
-
-clone_folder: C:\go-openapi\validate
-shallow_clone: true # for startup speed
-pull_requests:
- do_not_increment_build_number: true
-
-#skip_tags: true
-#skip_branch_with_pr: true
-
-# appveyor.yml
-build: off
-
-environment:
- GOPATH: c:\gopath
-
-stack: go 1.15
-
-test_script:
- - go test -v -timeout 20m -args -enable-long ./...
-
-deploy: off
-
-notifications:
- - provider: Slack
- incoming_webhook: https://hooks.slack.com/services/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
- auth_token:
- secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4=
- channel: bots
- on_build_success: false
- on_build_failure: true
- on_build_status_changed: true
diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go
index bd14c2a2..e0dd9383 100644
--- a/vendor/github.com/go-openapi/validate/default_validator.go
+++ b/vendor/github.com/go-openapi/validate/default_validator.go
@@ -25,48 +25,55 @@ import (
// According to Swagger spec, default values MUST validate their schema.
type defaultValidator struct {
SpecValidator *SpecValidator
- visitedSchemas map[string]bool
+ visitedSchemas map[string]struct{}
+ schemaOptions *SchemaValidatorOptions
}
// resetVisited resets the internal state of visited schemas
func (d *defaultValidator) resetVisited() {
- d.visitedSchemas = map[string]bool{}
+ if d.visitedSchemas == nil {
+ d.visitedSchemas = make(map[string]struct{})
+
+ return
+ }
+
+ // TODO(go1.21): clear(ex.visitedSchemas)
+ for k := range d.visitedSchemas {
+ delete(d.visitedSchemas, k)
+ }
}
-func isVisited(path string, visitedSchemas map[string]bool) bool {
- found := visitedSchemas[path]
- if !found {
- // search for overlapping paths
- frags := strings.Split(path, ".")
- if len(frags) < 2 {
- // shortcut exit on smaller paths
- return found
+func isVisited(path string, visitedSchemas map[string]struct{}) bool {
+ _, found := visitedSchemas[path]
+ if found {
+ return true
+ }
+
+ // search for overlapping paths
+ var (
+ parent string
+ suffix string
+ )
+ for i := len(path) - 2; i >= 0; i-- {
+ r := path[i]
+ if r != '.' {
+ continue
}
- last := len(frags) - 1
- var currentFragStr, parent string
- for i := range frags {
- if i == 0 {
- currentFragStr = frags[last]
- } else {
- currentFragStr = strings.Join([]string{frags[last-i], currentFragStr}, ".")
- }
- if i < last {
- parent = strings.Join(frags[0:last-i], ".")
- } else {
- parent = ""
- }
- if strings.HasSuffix(parent, currentFragStr) {
- found = true
- break
- }
+
+ parent = path[0:i]
+ suffix = path[i+1:]
+
+ if strings.HasSuffix(parent, suffix) {
+ return true
}
}
- return found
+
+ return false
}
// beingVisited asserts a schema is being visited
func (d *defaultValidator) beingVisited(path string) {
- d.visitedSchemas[path] = true
+ d.visitedSchemas[path] = struct{}{}
}
// isVisited tells if a path has already been visited
@@ -75,8 +82,9 @@ func (d *defaultValidator) isVisited(path string) bool {
}
// Validate validates the default values declared in the swagger spec
-func (d *defaultValidator) Validate() (errs *Result) {
- errs = new(Result)
+func (d *defaultValidator) Validate() *Result {
+ errs := pools.poolOfResults.BorrowResult() // will redeem when merged
+
if d == nil || d.SpecValidator == nil {
return errs
}
@@ -89,7 +97,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// every default value that is specified must validate against the schema for that property
// headers, items, parameters, schema
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult() // will redeem when merged
s := d.SpecValidator
for method, pathItem := range s.expandedAnalyzer().Operations() {
@@ -107,10 +115,12 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// default values provided must validate against their inline definition (no explicit schema)
if param.Default != nil && param.Schema == nil {
// check param default value is valid
- red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Default) //#nosec
+ red := newParamValidator(¶m, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -120,6 +130,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -129,6 +141,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
}
@@ -154,7 +168,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// reset explored schemas to get depth-first recursive-proof exploration
d.resetVisited()
for nm, sch := range s.spec.Spec().Definitions {
- res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec
}
}
return res
@@ -170,17 +184,18 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode)
- // nolint: dupl
if response.Headers != nil { // Safeguard
for nm, h := range response.Headers {
// reset explored schemas to get depth-first recursive-proof exploration
d.resetVisited()
if h.Default != nil {
- red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Default) //#nosec
+ red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -190,6 +205,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -209,6 +226,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
// Additional message to make sure the context of the error is not lost
res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
return res
@@ -220,11 +239,13 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
return nil
}
d.beingVisited(path)
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
s := d.SpecValidator
if schema.Default != nil {
- res.Merge(NewSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Default))
+ res.Merge(
+ newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default),
+ )
}
if schema.Items != nil {
if schema.Items.Schema != nil {
@@ -242,7 +263,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
}
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
// NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well)
- res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema))
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema))
}
for propName, prop := range schema.Properties {
res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
@@ -251,7 +272,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
}
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
- res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema))
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema))
}
if schema.AllOf != nil {
for i, aoSch := range schema.AllOf {
@@ -262,13 +283,15 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
}
// TODO: Temporary duplicated code. Need to refactor with examples
-// nolint: dupl
+
func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
s := d.SpecValidator
if items != nil {
if items.Default != nil {
- res.Merge(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Default))
+ res.Merge(
+ newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default),
+ )
}
if items.Items != nil {
res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items))
diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go
index f5ca9a5d..d2b901ea 100644
--- a/vendor/github.com/go-openapi/validate/doc.go
+++ b/vendor/github.com/go-openapi/validate/doc.go
@@ -19,7 +19,7 @@ as well as tools to validate data against their schema.
This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference
can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md.
-Validating a specification
+# Validating a specification
Validates a spec document (from JSON or YAML) against the JSON schema for swagger,
then checks a number of extra rules that can't be expressed in JSON schema.
@@ -30,34 +30,36 @@ Entry points:
- SpecValidator.Validate()
Reported as errors:
- [x] definition can't declare a property that's already defined by one of its ancestors
- [x] definition's ancestor can't be a descendant of the same model
- [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method
- [x] each security reference should contain only unique scopes
- [x] each security scope in a security definition should be unique
- [x] parameters in path must be unique
- [x] each path parameter must correspond to a parameter placeholder and vice versa
- [x] each referenceable definition must have references
- [x] each definition property listed in the required array must be defined in the properties of the model
- [x] each parameter should have a unique `name` and `type` combination
- [x] each operation should have only 1 parameter of type body
- [x] each reference must point to a valid object
- [x] every default value that is specified must validate against the schema for that property
- [x] items property is required for all schemas/definitions of type `array`
- [x] path parameters must be declared a required
- [x] headers must not contain $ref
- [x] schema and property examples provided must validate against their respective object's schema
- [x] examples provided must validate their schema
+
+ [x] definition can't declare a property that's already defined by one of its ancestors
+ [x] definition's ancestor can't be a descendant of the same model
+ [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness.
+ [x] each security reference should contain only unique scopes
+ [x] each security scope in a security definition should be unique
+ [x] parameters in path must be unique
+ [x] each path parameter must correspond to a parameter placeholder and vice versa
+ [x] each referenceable definition must have references
+ [x] each definition property listed in the required array must be defined in the properties of the model
+ [x] each parameter should have a unique `name` and `type` combination
+ [x] each operation should have only 1 parameter of type body
+ [x] each reference must point to a valid object
+ [x] every default value that is specified must validate against the schema for that property
+ [x] items property is required for all schemas/definitions of type `array`
+ [x] path parameters must be declared a required
+ [x] headers must not contain $ref
+ [x] schema and property examples provided must validate against their respective object's schema
+ [x] examples provided must validate their schema
Reported as warnings:
- [x] path parameters should not contain any of [{,},\w]
- [x] empty path
- [x] unused definitions
- [x] unsupported validation of examples on non-JSON media types
- [x] examples in response without schema
- [x] readOnly properties should not be required
-Validating a schema
+ [x] path parameters should not contain any of [{,},\w]
+ [x] empty path
+ [x] unused definitions
+ [x] unsupported validation of examples on non-JSON media types
+ [x] examples in response without schema
+ [x] readOnly properties should not be required
+
+# Validating a schema
The schema validation toolkit validates data against JSON-schema-draft 04 schema.
@@ -70,16 +72,16 @@ Entry points:
- AgainstSchema()
- ...
-Known limitations
+# Known limitations
With the current version of this package, the following aspects of swagger are not yet supported:
- [ ] errors and warnings are not reported with key/line number in spec
- [ ] default values and examples on responses only support application/json producer type
- [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values
- [ ] rules for collectionFormat are not implemented
- [ ] no validation rule for polymorphism support (discriminator) [not done here]
- [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid
- [ ] arbitrary large numbers are not supported: max is math.MaxFloat64
+ [ ] errors and warnings are not reported with key/line number in spec
+ [ ] default values and examples on responses only support application/json producer type
+ [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values
+ [ ] rules for collectionFormat are not implemented
+ [ ] no validation rule for polymorphism support (discriminator) [not done here]
+ [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid
+ [ ] arbitrary large numbers are not supported: max is math.MaxFloat64
*/
package validate
diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go
index c8bffd78..d0895697 100644
--- a/vendor/github.com/go-openapi/validate/example_validator.go
+++ b/vendor/github.com/go-openapi/validate/example_validator.go
@@ -23,17 +23,27 @@ import (
// ExampleValidator validates example values defined in a spec
type exampleValidator struct {
SpecValidator *SpecValidator
- visitedSchemas map[string]bool
+ visitedSchemas map[string]struct{}
+ schemaOptions *SchemaValidatorOptions
}
// resetVisited resets the internal state of visited schemas
func (ex *exampleValidator) resetVisited() {
- ex.visitedSchemas = map[string]bool{}
+ if ex.visitedSchemas == nil {
+ ex.visitedSchemas = make(map[string]struct{})
+
+ return
+ }
+
+ // TODO(go1.21): clear(ex.visitedSchemas)
+ for k := range ex.visitedSchemas {
+ delete(ex.visitedSchemas, k)
+ }
}
// beingVisited asserts a schema is being visited
func (ex *exampleValidator) beingVisited(path string) {
- ex.visitedSchemas[path] = true
+ ex.visitedSchemas[path] = struct{}{}
}
// isVisited tells if a path has already been visited
@@ -48,9 +58,9 @@ func (ex *exampleValidator) isVisited(path string) bool {
// - schemas
// - individual property
// - responses
-//
-func (ex *exampleValidator) Validate() (errs *Result) {
- errs = new(Result)
+func (ex *exampleValidator) Validate() *Result {
+ errs := pools.poolOfResults.BorrowResult()
+
if ex == nil || ex.SpecValidator == nil {
return errs
}
@@ -65,7 +75,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// in: schemas, properties, object, items
// not in: headers, parameters without schema
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
s := ex.SpecValidator
for method, pathItem := range s.expandedAnalyzer().Operations() {
@@ -83,10 +93,12 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// default values provided must validate against their inline definition (no explicit schema)
if param.Example != nil && param.Schema == nil {
// check param default value is valid
- red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Example) //#nosec
+ red := newParamValidator(¶m, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In))
res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -96,6 +108,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -105,6 +119,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
}
@@ -130,7 +146,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// reset explored schemas to get depth-first recursive-proof exploration
ex.resetVisited()
for nm, sch := range s.spec.Spec().Definitions {
- res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec
}
}
return res
@@ -146,17 +162,18 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode)
- // nolint: dupl
if response.Headers != nil { // Safeguard
for nm, h := range response.Headers {
// reset explored schemas to get depth-first recursive-proof exploration
ex.resetVisited()
if h.Example != nil {
- red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Example) //#nosec
+ red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName))
res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -166,6 +183,8 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName))
res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
@@ -185,13 +204,17 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
// Additional message to make sure the context of the error is not lost
res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName))
res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
}
}
if response.Examples != nil {
if response.Schema != nil {
if example, ok := response.Examples["application/json"]; ok {
- res.MergeAsWarnings(NewSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, SwaggerSchema(true)).Validate(example))
+ res.MergeAsWarnings(
+ newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example),
+ )
} else {
// TODO: validate other media types too
res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName))
@@ -210,10 +233,12 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
}
ex.beingVisited(path)
s := ex.SpecValidator
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
if schema.Example != nil {
- res.MergeAsWarnings(NewSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Example))
+ res.MergeAsWarnings(
+ newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example),
+ )
}
if schema.Items != nil {
if schema.Items.Schema != nil {
@@ -231,7 +256,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
}
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
// NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well)
- res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema))
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema))
}
for propName, prop := range schema.Properties {
res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
@@ -240,7 +265,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
}
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
- res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema))
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema))
}
if schema.AllOf != nil {
for i, aoSch := range schema.AllOf {
@@ -251,13 +276,16 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
}
// TODO: Temporary duplicated code. Need to refactor with examples
-// nolint: dupl
+//
+
func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
s := ex.SpecValidator
if items != nil {
if items.Example != nil {
- res.MergeAsWarnings(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Example))
+ res.MergeAsWarnings(
+ newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example),
+ )
}
if items.Items != nil {
res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items))
@@ -266,5 +294,6 @@ func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in stri
res.AddErrors(invalidPatternInMsg(path, in, items.Pattern))
}
}
+
return res
}
diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go
index 0ad996cb..f4e35521 100644
--- a/vendor/github.com/go-openapi/validate/formats.go
+++ b/vendor/github.com/go-openapi/validate/formats.go
@@ -22,10 +22,32 @@ import (
)
type formatValidator struct {
- Format string
Path string
In string
+ Format string
KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var f *formatValidator
+ if opts.recycleValidators {
+ f = pools.poolOfFormatValidators.BorrowValidator()
+ } else {
+ f = new(formatValidator)
+ }
+
+ f.Path = path
+ f.In = in
+ f.Format = format
+ f.KnownFormats = formats
+ f.Options = opts
+
+ return f
}
func (f *formatValidator) SetPath(path string) {
@@ -33,37 +55,45 @@ func (f *formatValidator) SetPath(path string) {
}
func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool {
- doit := func() bool {
- if source == nil {
- return false
- }
- switch source := source.(type) {
- case *spec.Items:
- return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
- case *spec.Parameter:
- return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
- case *spec.Schema:
- return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
- case *spec.Header:
- return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
- }
+ if source == nil || f.KnownFormats == nil {
+ return false
+ }
+
+ switch source := source.(type) {
+ case *spec.Items:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Parameter:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Schema:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Header:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ default:
return false
}
- r := doit()
- debugLog("format validator for %q applies %t for %T (kind: %v)\n", f.Path, r, source, kind)
- return r
}
func (f *formatValidator) Validate(val interface{}) *Result {
- result := new(Result)
- debugLog("validating \"%v\" against format: %s", val, f.Format)
+ if f.Options.recycleValidators {
+ defer func() {
+ f.redeem()
+ }()
+ }
+
+ var result *Result
+ if f.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil {
result.AddErrors(err)
}
- if result.HasErrors() {
- return result
- }
- return nil
+ return result
+}
+
+func (f *formatValidator) redeem() {
+ pools.poolOfFormatValidators.RedeemValidator(f)
}
diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go
index 48ebfab5..757e403d 100644
--- a/vendor/github.com/go-openapi/validate/helpers.go
+++ b/vendor/github.com/go-openapi/validate/helpers.go
@@ -101,9 +101,17 @@ type errorHelper struct {
// A collection of unexported helpers for error construction
}
-func (h *errorHelper) sErr(err errors.Error) *Result {
+func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result {
// Builds a Result from standard errors.Error
- return &Result{Errors: []error{err}}
+ var result *Result
+ if recycle {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+ result.Errors = []error{err}
+
+ return result
}
func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result {
@@ -157,7 +165,7 @@ func (h *valueHelper) asInt64(val interface{}) int64 {
// Number conversion function for int64, without error checking
// (implements an implicit type upgrade).
v := reflect.ValueOf(val)
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@@ -174,7 +182,7 @@ func (h *valueHelper) asUint64(val interface{}) uint64 {
// Number conversion function for uint64, without error checking
// (implements an implicit type upgrade).
v := reflect.ValueOf(val)
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return uint64(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@@ -192,7 +200,7 @@ func (h *valueHelper) asFloat64(val interface{}) float64 {
// Number conversion function for float64, without error checking
// (implements an implicit type upgrade).
v := reflect.ValueOf(val)
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@@ -225,7 +233,7 @@ func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, re
operation.Parameters = resolvedParams
for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path,
- func(p spec.Parameter, err error) bool {
+ func(_ spec.Parameter, err error) bool {
// since params have already been expanded, there are few causes for error
res.AddErrors(someParametersBrokenMsg(path, method, operationID))
// original error from analyzer
@@ -250,7 +258,7 @@ func (h *paramHelper) resolveParam(path, method, operationID string, param *spec
}
if err != nil { // Safeguard
- // NOTE: we may enter enter here when the whole parameter is an unresolved $ref
+ // NOTE: we may enter here when the whole parameter is an unresolved $ref
refPath := strings.Join([]string{"\"" + path + "\"", method}, ".")
errorHelp.addPointerError(res, err, param.Ref.String(), refPath)
return nil, res
@@ -306,6 +314,7 @@ func (r *responseHelper) expandResponseRef(
errorHelp.addPointerError(res, err, response.Ref.String(), path)
return nil, res
}
+
return response, res
}
diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go
index 7bb12615..dff73fa9 100644
--- a/vendor/github.com/go-openapi/validate/object_validator.go
+++ b/vendor/github.com/go-openapi/validate/object_validator.go
@@ -15,8 +15,8 @@
package validate
import (
+ "fmt"
"reflect"
- "regexp"
"strings"
"github.com/go-openapi/errors"
@@ -35,62 +35,116 @@ type objectValidator struct {
PatternProperties map[string]spec.Schema
Root interface{}
KnownFormats strfmt.Registry
- Options SchemaValidatorOptions
+ Options *SchemaValidatorOptions
+ splitPath []string
+}
+
+func newObjectValidator(path, in string,
+ maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties,
+ additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties,
+ root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var v *objectValidator
+ if opts.recycleValidators {
+ v = pools.poolOfObjectValidators.BorrowValidator()
+ } else {
+ v = new(objectValidator)
+ }
+
+ v.Path = path
+ v.In = in
+ v.MaxProperties = maxProperties
+ v.MinProperties = minProperties
+ v.Required = required
+ v.Properties = properties
+ v.AdditionalProperties = additionalProperties
+ v.PatternProperties = patternProperties
+ v.Root = root
+ v.KnownFormats = formats
+ v.Options = opts
+ v.splitPath = strings.Split(v.Path, ".")
+
+ return v
}
func (o *objectValidator) SetPath(path string) {
o.Path = path
+ o.splitPath = strings.Split(path, ".")
}
func (o *objectValidator) Applies(source interface{}, kind reflect.Kind) bool {
// TODO: this should also work for structs
// there is a problem in the type validator where it will be unhappy about null values
// so that requires more testing
- r := reflect.TypeOf(source) == specSchemaType && (kind == reflect.Map || kind == reflect.Struct)
- debugLog("object validator for %q applies %t for %T (kind: %v)\n", o.Path, r, source, kind)
- return r
+ _, isSchema := source.(*spec.Schema)
+ return isSchema && (kind == reflect.Map || kind == reflect.Struct)
}
func (o *objectValidator) isProperties() bool {
- p := strings.Split(o.Path, ".")
+ p := o.splitPath
return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties
}
func (o *objectValidator) isDefault() bool {
- p := strings.Split(o.Path, ".")
+ p := o.splitPath
return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault
}
func (o *objectValidator) isExample() bool {
- p := strings.Split(o.Path, ".")
+ p := o.splitPath
return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample
}
func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]interface{}) {
// for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly.
// with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type).
- if t, typeFound := val[jsonType]; typeFound {
- if tpe, ok := t.(string); ok && tpe == arrayType {
- if item, itemsKeyFound := val[jsonItems]; !itemsKeyFound {
- res.AddErrors(errors.Required(jsonItems, o.Path, item))
- }
- }
+ if val == nil {
+ return
}
+
+ t, typeFound := val[jsonType]
+ if !typeFound {
+ return
+ }
+
+ tpe, isString := t.(string)
+ if !isString || tpe != arrayType {
+ return
+ }
+
+ item, itemsKeyFound := val[jsonItems]
+ if itemsKeyFound {
+ return
+ }
+
+ res.AddErrors(errors.Required(jsonItems, o.Path, item))
}
func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]interface{}) {
- if !o.isProperties() && !o.isDefault() && !o.isExample() {
- if _, itemsKeyFound := val[jsonItems]; itemsKeyFound {
- t, typeFound := val[jsonType]
- if typeFound {
- if tpe, ok := t.(string); !ok || tpe != arrayType {
- res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil))
- }
- } else {
- // there is no type
- res.AddErrors(errors.Required(jsonType, o.Path, t))
- }
- }
+ if val == nil {
+ return
+ }
+
+ if o.isProperties() || o.isDefault() || o.isExample() {
+ return
+ }
+
+ _, itemsKeyFound := val[jsonItems]
+ if !itemsKeyFound {
+ return
+ }
+
+ t, typeFound := val[jsonType]
+ if !typeFound {
+ // there is no type
+ res.AddErrors(errors.Required(jsonType, o.Path, t))
+ }
+
+ if tpe, isString := t.(string); !isString || tpe != arrayType {
+ res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil))
}
}
@@ -104,176 +158,274 @@ func (o *objectValidator) precheck(res *Result, val map[string]interface{}) {
}
func (o *objectValidator) Validate(data interface{}) *Result {
- val := data.(map[string]interface{})
- // TODO: guard against nil data
+ if o.Options.recycleValidators {
+ defer func() {
+ o.redeem()
+ }()
+ }
+
+ var val map[string]interface{}
+ if data != nil {
+ var ok bool
+ val, ok = data.(map[string]interface{})
+ if !ok {
+ return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult)
+ }
+ }
numKeys := int64(len(val))
if o.MinProperties != nil && numKeys < *o.MinProperties {
- return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties))
+ return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult)
}
if o.MaxProperties != nil && numKeys > *o.MaxProperties {
- return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties))
+ return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult)
}
- res := new(Result)
+ var res *Result
+ if o.Options.recycleResult {
+ res = pools.poolOfResults.BorrowResult()
+ } else {
+ res = new(Result)
+ }
o.precheck(res, val)
// check validity of field names
if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows {
// Case: additionalProperties: false
- for k := range val {
- _, regularProperty := o.Properties[k]
- matched := false
-
- for pk := range o.PatternProperties {
- if matches, _ := regexp.MatchString(pk, k); matches {
- matched = true
- break
- }
- }
-
- if !regularProperty && k != "$schema" && k != "id" && !matched {
- // Special properties "$schema" and "id" are ignored
- res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k))
-
- // BUG(fredbi): This section should move to a part dedicated to spec validation as
- // it will conflict with regular schemas where a property "headers" is defined.
-
- //
- // Croaks a more explicit message on top of the standard one
- // on some recognized cases.
- //
- // NOTE: edge cases with invalid type assertion are simply ignored here.
- // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered
- // by higher level callers (the IMPORTANT! tag will be eventually
- // removed).
- if k == "headers" && val[k] != nil {
- // $ref is forbidden in header
- if headers, mapOk := val[k].(map[string]interface{}); mapOk {
- for headerKey, headerBody := range headers {
- if headerBody != nil {
- if headerSchema, mapOfMapOk := headerBody.(map[string]interface{}); mapOfMapOk {
- if _, found := headerSchema["$ref"]; found {
- var msg string
- if refString, stringOk := headerSchema["$ref"].(string); stringOk {
- msg = strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "")
- }
- res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg))
- }
- }
- }
- }
- }
- /*
- case "$ref":
- if val[k] != nil {
- // TODO: check context of that ref: warn about siblings, check against invalid context
- }
- */
- }
- }
- }
+ o.validateNoAdditionalProperties(val, res)
} else {
- // Cases: no additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> }
- for key, value := range val {
- _, regularProperty := o.Properties[key]
-
- // Validates property against "patternProperties" if applicable
- // BUG(fredbi): succeededOnce is always false
-
- // NOTE: how about regular properties which do not match patternProperties?
- matched, succeededOnce, _ := o.validatePatternProperty(key, value, res)
-
- if !(regularProperty || matched || succeededOnce) {
-
- // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator
- if o.AdditionalProperties != nil && o.AdditionalProperties.Schema != nil {
- // AdditionalProperties as Schema
- r := NewSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)
- res.mergeForField(data.(map[string]interface{}), key, r)
- } else if regularProperty && !(matched || succeededOnce) {
- // TODO: this is dead code since regularProperty=false here
- res.AddErrors(errors.FailedAllPatternProperties(o.Path, o.In, key))
- }
- }
- }
- // Valid cases: additionalProperties: true or undefined
+ // Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> }
+ o.validateAdditionalProperties(val, res)
}
- createdFromDefaults := map[string]bool{}
-
- // Property types:
- // - regular Property
- for pName := range o.Properties {
- pSchema := o.Properties[pName] // one instance per iteration
- rName := pName
- if o.Path != "" {
- rName = o.Path + "." + pName
- }
-
- // Recursively validates each property against its schema
- if v, ok := val[pName]; ok {
- r := NewSchemaValidator(&pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v)
- res.mergeForField(data.(map[string]interface{}), pName, r)
- } else if pSchema.Default != nil {
- // If a default value is defined, creates the property from defaults
- // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does.
- createdFromDefaults[pName] = true
- res.addPropertySchemata(data.(map[string]interface{}), pName, &pSchema)
- }
- }
-
- // Check required properties
- if len(o.Required) > 0 {
- for _, k := range o.Required {
- if v, ok := val[k]; !ok && !createdFromDefaults[k] {
- res.AddErrors(errors.Required(o.Path+"."+k, o.In, v))
- continue
- }
- }
- }
+ o.validatePropertiesSchema(val, res)
// Check patternProperties
// TODO: it looks like we have done that twice in many cases
for key, value := range val {
_, regularProperty := o.Properties[key]
- matched, _ /*succeededOnce*/, patterns := o.validatePatternProperty(key, value, res)
- if !regularProperty && (matched /*|| succeededOnce*/) {
- for _, pName := range patterns {
- if v, ok := o.PatternProperties[pName]; ok {
- r := NewSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)
- res.mergeForField(data.(map[string]interface{}), key, r)
- }
+ matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well
+ if regularProperty || !matched {
+ continue
+ }
+
+ for _, pName := range patterns {
+ if v, ok := o.PatternProperties[pName]; ok {
+ r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value)
+ res.mergeForField(data.(map[string]interface{}), key, r)
}
}
}
+
return res
}
+func (o *objectValidator) validateNoAdditionalProperties(val map[string]interface{}, res *Result) {
+ for k := range val {
+ if k == "$schema" || k == "id" {
+ // special properties "$schema" and "id" are ignored
+ continue
+ }
+
+ _, regularProperty := o.Properties[k]
+ if regularProperty {
+ continue
+ }
+
+ matched := false
+ for pk := range o.PatternProperties {
+ re, err := compileRegexp(pk)
+ if err != nil {
+ continue
+ }
+ if matches := re.MatchString(k); matches {
+ matched = true
+ break
+ }
+ }
+ if matched {
+ continue
+ }
+
+ res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k))
+
+ // BUG(fredbi): This section should move to a part dedicated to spec validation as
+ // it will conflict with regular schemas where a property "headers" is defined.
+
+ //
+ // Croaks a more explicit message on top of the standard one
+ // on some recognized cases.
+ //
+ // NOTE: edge cases with invalid type assertion are simply ignored here.
+ // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered
+ // by higher level callers (the IMPORTANT! tag will be eventually
+ // removed).
+ if k != "headers" || val[k] == nil {
+ continue
+ }
+
+ // $ref is forbidden in header
+ headers, mapOk := val[k].(map[string]interface{})
+ if !mapOk {
+ continue
+ }
+
+ for headerKey, headerBody := range headers {
+ if headerBody == nil {
+ continue
+ }
+
+ headerSchema, mapOfMapOk := headerBody.(map[string]interface{})
+ if !mapOfMapOk {
+ continue
+ }
+
+ _, found := headerSchema["$ref"]
+ if !found {
+ continue
+ }
+
+ refString, stringOk := headerSchema["$ref"].(string)
+ if !stringOk {
+ continue
+ }
+
+ msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "")
+ res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg))
+ /*
+ case "$ref":
+ if val[k] != nil {
+ // TODO: check context of that ref: warn about siblings, check against invalid context
+ }
+ */
+ }
+ }
+}
+
+func (o *objectValidator) validateAdditionalProperties(val map[string]interface{}, res *Result) {
+ for key, value := range val {
+ _, regularProperty := o.Properties[key]
+ if regularProperty {
+ continue
+ }
+
+ // Validates property against "patternProperties" if applicable
+ // BUG(fredbi): succeededOnce is always false
+
+ // NOTE: how about regular properties which do not match patternProperties?
+ matched, succeededOnce, _ := o.validatePatternProperty(key, value, res)
+ if matched || succeededOnce {
+ continue
+ }
+
+ if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil {
+ continue
+ }
+
+ // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator
+ // AdditionalProperties as Schema
+ r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value)
+ res.mergeForField(val, key, r)
+ }
+ // Valid cases: additionalProperties: true or undefined
+}
+
+func (o *objectValidator) validatePropertiesSchema(val map[string]interface{}, res *Result) {
+ createdFromDefaults := map[string]struct{}{}
+
+ // Property types:
+ // - regular Property
+ pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties
+ defer func() {
+ pools.poolOfSchemas.RedeemSchema(pSchema)
+ }()
+
+ for pName := range o.Properties {
+ *pSchema = o.Properties[pName]
+ var rName string
+ if o.Path == "" {
+ rName = pName
+ } else {
+ rName = o.Path + "." + pName
+ }
+
+ // Recursively validates each property against its schema
+ v, ok := val[pName]
+ if ok {
+ r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v)
+ res.mergeForField(val, pName, r)
+
+ continue
+ }
+
+ if pSchema.Default != nil {
+ // if a default value is defined, creates the property from defaults
+ // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does.
+ createdFromDefaults[pName] = struct{}{}
+ if !o.Options.skipSchemataResult {
+ res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer
+ }
+ }
+ }
+
+ if len(o.Required) == 0 {
+ return
+ }
+
+ // Check required properties
+ for _, k := range o.Required {
+ v, ok := val[k]
+ if ok {
+ continue
+ }
+ _, isCreatedFromDefaults := createdFromDefaults[k]
+ if isCreatedFromDefaults {
+ continue
+ }
+
+ res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v))
+ }
+}
+
// TODO: succeededOnce is not used anywhere
func (o *objectValidator) validatePatternProperty(key string, value interface{}, result *Result) (bool, bool, []string) {
- matched := false
- succeededOnce := false
- var patterns []string
-
- for k, schema := range o.PatternProperties {
- sch := schema
- if match, _ := regexp.MatchString(k, key); match {
- patterns = append(patterns, k)
- matched = true
- validator := NewSchemaValidator(&sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...)
-
- res := validator.Validate(value)
- result.Merge(res)
- }
+ if len(o.PatternProperties) == 0 {
+ return false, false, nil
}
- // BUG(fredbi): can't get to here. Should remove dead code (commented out).
+ matched := false
+ succeededOnce := false
+ patterns := make([]string, 0, len(o.PatternProperties))
- // if succeededOnce {
- // result.Inc()
- // }
+ schema := pools.poolOfSchemas.BorrowSchema()
+ defer func() {
+ pools.poolOfSchemas.RedeemSchema(schema)
+ }()
+
+ for k := range o.PatternProperties {
+ re, err := compileRegexp(k)
+ if err != nil {
+ continue
+ }
+
+ match := re.MatchString(key)
+ if !match {
+ continue
+ }
+
+ *schema = o.PatternProperties[k]
+ patterns = append(patterns, k)
+ matched = true
+ validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options)
+
+ res := validator.Validate(value)
+ result.Merge(res)
+ }
return matched, succeededOnce, patterns
}
+
+func (o *objectValidator) redeem() {
+ pools.poolOfObjectValidators.RedeemValidator(o)
+}
diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go
index deeec2f2..cfe9b066 100644
--- a/vendor/github.com/go-openapi/validate/options.go
+++ b/vendor/github.com/go-openapi/validate/options.go
@@ -21,10 +21,29 @@ import "sync"
// NOTE: other options might be needed, for example a go-swagger specific mode.
type Opts struct {
ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid
+
+ // StrictPathParamUniqueness enables a strict validation of paths that include
+ // path parameters. When true, it will enforce that for each method, the path
+ // is unique, regardless of path parameters such that GET:/petstore/{id} and
+ // GET:/petstore/{pet} anre considered duplicate paths.
+ //
+ // Consider disabling if path parameters can include slashes such as
+ // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and
+ // /"shelve/*/book/*" respectively.
+ StrictPathParamUniqueness bool
+ SkipSchemataResult bool
}
var (
- defaultOpts = Opts{ContinueOnErrors: false} // default is to stop validation on errors
+ defaultOpts = Opts{
+ // default is to stop validation on errors
+ ContinueOnErrors: false,
+
+ // StrictPathParamUniqueness is defaulted to true. This maintains existing
+ // behavior.
+ StrictPathParamUniqueness: true,
+ }
+
defaultOptsMutex = &sync.Mutex{}
)
diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go
new file mode 100644
index 00000000..3ddce4dc
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/pools.go
@@ -0,0 +1,366 @@
+//go:build !validatedebug
+
+package validate
+
+import (
+ "sync"
+
+ "github.com/go-openapi/spec"
+)
+
+var pools allPools
+
+func init() {
+ resetPools()
+}
+
+func resetPools() {
+ // NOTE: for testing purpose, we might want to reset pools after calling Validate twice.
+ // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool
+ // and further calls to Get are mishandled.
+
+ pools = allPools{
+ poolOfSchemaValidators: schemaValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &SchemaValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfObjectValidators: objectValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &objectValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSliceValidators: sliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaSliceValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfItemsValidators: itemsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &itemsValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfBasicCommonValidators: basicCommonValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicCommonValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfHeaderValidators: headerValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &HeaderValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfParamValidators: paramValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &ParamValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfBasicSliceValidators: basicSliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicSliceValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfNumberValidators: numberValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &numberValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfStringValidators: stringValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &stringValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSchemaPropsValidators: schemaPropsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaPropsValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfFormatValidators: formatValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &formatValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfTypeValidators: typeValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &typeValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSchemas: schemasPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &spec.Schema{}
+
+ return s
+ },
+ },
+ },
+ poolOfResults: resultsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &Result{}
+
+ return s
+ },
+ },
+ },
+ }
+}
+
+type (
+ allPools struct {
+ // memory pools for all validator objects.
+ //
+ // Each pool can be borrowed from and redeemed to.
+ poolOfSchemaValidators schemaValidatorsPool
+ poolOfObjectValidators objectValidatorsPool
+ poolOfSliceValidators sliceValidatorsPool
+ poolOfItemsValidators itemsValidatorsPool
+ poolOfBasicCommonValidators basicCommonValidatorsPool
+ poolOfHeaderValidators headerValidatorsPool
+ poolOfParamValidators paramValidatorsPool
+ poolOfBasicSliceValidators basicSliceValidatorsPool
+ poolOfNumberValidators numberValidatorsPool
+ poolOfStringValidators stringValidatorsPool
+ poolOfSchemaPropsValidators schemaPropsValidatorsPool
+ poolOfFormatValidators formatValidatorsPool
+ poolOfTypeValidators typeValidatorsPool
+ poolOfSchemas schemasPool
+ poolOfResults resultsPool
+ }
+
+ schemaValidatorsPool struct {
+ *sync.Pool
+ }
+
+ objectValidatorsPool struct {
+ *sync.Pool
+ }
+
+ sliceValidatorsPool struct {
+ *sync.Pool
+ }
+
+ itemsValidatorsPool struct {
+ *sync.Pool
+ }
+
+ basicCommonValidatorsPool struct {
+ *sync.Pool
+ }
+
+ headerValidatorsPool struct {
+ *sync.Pool
+ }
+
+ paramValidatorsPool struct {
+ *sync.Pool
+ }
+
+ basicSliceValidatorsPool struct {
+ *sync.Pool
+ }
+
+ numberValidatorsPool struct {
+ *sync.Pool
+ }
+
+ stringValidatorsPool struct {
+ *sync.Pool
+ }
+
+ schemaPropsValidatorsPool struct {
+ *sync.Pool
+ }
+
+ formatValidatorsPool struct {
+ *sync.Pool
+ }
+
+ typeValidatorsPool struct {
+ *sync.Pool
+ }
+
+ schemasPool struct {
+ *sync.Pool
+ }
+
+ resultsPool struct {
+ *sync.Pool
+ }
+)
+
+func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator {
+ return p.Get().(*SchemaValidator)
+}
+
+func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) {
+ // NOTE: s might be nil. In that case, Put is a noop.
+ p.Put(s)
+}
+
+func (p objectValidatorsPool) BorrowValidator() *objectValidator {
+ return p.Get().(*objectValidator)
+}
+
+func (p objectValidatorsPool) RedeemValidator(s *objectValidator) {
+ p.Put(s)
+}
+
+func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator {
+ return p.Get().(*schemaSliceValidator)
+}
+
+func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) {
+ p.Put(s)
+}
+
+func (p itemsValidatorsPool) BorrowValidator() *itemsValidator {
+ return p.Get().(*itemsValidator)
+}
+
+func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) {
+ p.Put(s)
+}
+
+func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator {
+ return p.Get().(*basicCommonValidator)
+}
+
+func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) {
+ p.Put(s)
+}
+
+func (p headerValidatorsPool) BorrowValidator() *HeaderValidator {
+ return p.Get().(*HeaderValidator)
+}
+
+func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) {
+ p.Put(s)
+}
+
+func (p paramValidatorsPool) BorrowValidator() *ParamValidator {
+ return p.Get().(*ParamValidator)
+}
+
+func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) {
+ p.Put(s)
+}
+
+func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator {
+ return p.Get().(*basicSliceValidator)
+}
+
+func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) {
+ p.Put(s)
+}
+
+func (p numberValidatorsPool) BorrowValidator() *numberValidator {
+ return p.Get().(*numberValidator)
+}
+
+func (p numberValidatorsPool) RedeemValidator(s *numberValidator) {
+ p.Put(s)
+}
+
+func (p stringValidatorsPool) BorrowValidator() *stringValidator {
+ return p.Get().(*stringValidator)
+}
+
+func (p stringValidatorsPool) RedeemValidator(s *stringValidator) {
+ p.Put(s)
+}
+
+func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator {
+ return p.Get().(*schemaPropsValidator)
+}
+
+func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) {
+ p.Put(s)
+}
+
+func (p formatValidatorsPool) BorrowValidator() *formatValidator {
+ return p.Get().(*formatValidator)
+}
+
+func (p formatValidatorsPool) RedeemValidator(s *formatValidator) {
+ p.Put(s)
+}
+
+func (p typeValidatorsPool) BorrowValidator() *typeValidator {
+ return p.Get().(*typeValidator)
+}
+
+func (p typeValidatorsPool) RedeemValidator(s *typeValidator) {
+ p.Put(s)
+}
+
+func (p schemasPool) BorrowSchema() *spec.Schema {
+ return p.Get().(*spec.Schema)
+}
+
+func (p schemasPool) RedeemSchema(s *spec.Schema) {
+ p.Put(s)
+}
+
+func (p resultsPool) BorrowResult() *Result {
+ return p.Get().(*Result).cleared()
+}
+
+func (p resultsPool) RedeemResult(s *Result) {
+ if s == emptyResult {
+ return
+ }
+ p.Put(s)
+}
diff --git a/vendor/github.com/go-openapi/validate/pools_debug.go b/vendor/github.com/go-openapi/validate/pools_debug.go
new file mode 100644
index 00000000..12949f02
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/pools_debug.go
@@ -0,0 +1,1012 @@
+//go:build validatedebug
+
+package validate
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "testing"
+
+ "github.com/go-openapi/spec"
+)
+
+// This version of the pools is to be used for debugging and testing, with build tag "validatedebug".
+//
+// In this mode, the pools are tracked for allocation and redemption of borrowed objects, so we can
+// verify a few behaviors of the validators. The debug pools panic when an invalid usage pattern is detected.
+
+var pools allPools
+
+func init() {
+ resetPools()
+}
+
+func resetPools() {
+ // NOTE: for testing purpose, we might want to reset pools after calling Validate twice.
+ // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool
+ // and further calls to Get are mishandled.
+
+ pools = allPools{
+ poolOfSchemaValidators: schemaValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &SchemaValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*SchemaValidator]status),
+ allocMap: make(map[*SchemaValidator]string),
+ redeemMap: make(map[*SchemaValidator]string),
+ },
+ poolOfObjectValidators: objectValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &objectValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*objectValidator]status),
+ allocMap: make(map[*objectValidator]string),
+ redeemMap: make(map[*objectValidator]string),
+ },
+ poolOfSliceValidators: sliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaSliceValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*schemaSliceValidator]status),
+ allocMap: make(map[*schemaSliceValidator]string),
+ redeemMap: make(map[*schemaSliceValidator]string),
+ },
+ poolOfItemsValidators: itemsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &itemsValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*itemsValidator]status),
+ allocMap: make(map[*itemsValidator]string),
+ redeemMap: make(map[*itemsValidator]string),
+ },
+ poolOfBasicCommonValidators: basicCommonValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicCommonValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*basicCommonValidator]status),
+ allocMap: make(map[*basicCommonValidator]string),
+ redeemMap: make(map[*basicCommonValidator]string),
+ },
+ poolOfHeaderValidators: headerValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &HeaderValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*HeaderValidator]status),
+ allocMap: make(map[*HeaderValidator]string),
+ redeemMap: make(map[*HeaderValidator]string),
+ },
+ poolOfParamValidators: paramValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &ParamValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*ParamValidator]status),
+ allocMap: make(map[*ParamValidator]string),
+ redeemMap: make(map[*ParamValidator]string),
+ },
+ poolOfBasicSliceValidators: basicSliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicSliceValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*basicSliceValidator]status),
+ allocMap: make(map[*basicSliceValidator]string),
+ redeemMap: make(map[*basicSliceValidator]string),
+ },
+ poolOfNumberValidators: numberValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &numberValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*numberValidator]status),
+ allocMap: make(map[*numberValidator]string),
+ redeemMap: make(map[*numberValidator]string),
+ },
+ poolOfStringValidators: stringValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &stringValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*stringValidator]status),
+ allocMap: make(map[*stringValidator]string),
+ redeemMap: make(map[*stringValidator]string),
+ },
+ poolOfSchemaPropsValidators: schemaPropsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaPropsValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*schemaPropsValidator]status),
+ allocMap: make(map[*schemaPropsValidator]string),
+ redeemMap: make(map[*schemaPropsValidator]string),
+ },
+ poolOfFormatValidators: formatValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &formatValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*formatValidator]status),
+ allocMap: make(map[*formatValidator]string),
+ redeemMap: make(map[*formatValidator]string),
+ },
+ poolOfTypeValidators: typeValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &typeValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*typeValidator]status),
+ allocMap: make(map[*typeValidator]string),
+ redeemMap: make(map[*typeValidator]string),
+ },
+ poolOfSchemas: schemasPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &spec.Schema{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*spec.Schema]status),
+ allocMap: make(map[*spec.Schema]string),
+ redeemMap: make(map[*spec.Schema]string),
+ },
+ poolOfResults: resultsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &Result{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*Result]status),
+ allocMap: make(map[*Result]string),
+ redeemMap: make(map[*Result]string),
+ },
+ }
+}
+
+const (
+ statusFresh status = iota + 1
+ statusRecycled
+ statusRedeemed
+)
+
+func (s status) String() string {
+ switch s {
+ case statusFresh:
+ return "fresh"
+ case statusRecycled:
+ return "recycled"
+ case statusRedeemed:
+ return "redeemed"
+ default:
+ panic(fmt.Errorf("invalid status: %d", s))
+ }
+}
+
+type (
+ // Debug
+ status uint8
+
+ allPools struct {
+ // memory pools for all validator objects.
+ //
+ // Each pool can be borrowed from and redeemed to.
+ poolOfSchemaValidators schemaValidatorsPool
+ poolOfObjectValidators objectValidatorsPool
+ poolOfSliceValidators sliceValidatorsPool
+ poolOfItemsValidators itemsValidatorsPool
+ poolOfBasicCommonValidators basicCommonValidatorsPool
+ poolOfHeaderValidators headerValidatorsPool
+ poolOfParamValidators paramValidatorsPool
+ poolOfBasicSliceValidators basicSliceValidatorsPool
+ poolOfNumberValidators numberValidatorsPool
+ poolOfStringValidators stringValidatorsPool
+ poolOfSchemaPropsValidators schemaPropsValidatorsPool
+ poolOfFormatValidators formatValidatorsPool
+ poolOfTypeValidators typeValidatorsPool
+ poolOfSchemas schemasPool
+ poolOfResults resultsPool
+ }
+
+ schemaValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*SchemaValidator]status
+ allocMap map[*SchemaValidator]string
+ redeemMap map[*SchemaValidator]string
+ mx sync.Mutex
+ }
+
+ objectValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*objectValidator]status
+ allocMap map[*objectValidator]string
+ redeemMap map[*objectValidator]string
+ mx sync.Mutex
+ }
+
+ sliceValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*schemaSliceValidator]status
+ allocMap map[*schemaSliceValidator]string
+ redeemMap map[*schemaSliceValidator]string
+ mx sync.Mutex
+ }
+
+ itemsValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*itemsValidator]status
+ allocMap map[*itemsValidator]string
+ redeemMap map[*itemsValidator]string
+ mx sync.Mutex
+ }
+
+ basicCommonValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*basicCommonValidator]status
+ allocMap map[*basicCommonValidator]string
+ redeemMap map[*basicCommonValidator]string
+ mx sync.Mutex
+ }
+
+ headerValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*HeaderValidator]status
+ allocMap map[*HeaderValidator]string
+ redeemMap map[*HeaderValidator]string
+ mx sync.Mutex
+ }
+
+ paramValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*ParamValidator]status
+ allocMap map[*ParamValidator]string
+ redeemMap map[*ParamValidator]string
+ mx sync.Mutex
+ }
+
+ basicSliceValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*basicSliceValidator]status
+ allocMap map[*basicSliceValidator]string
+ redeemMap map[*basicSliceValidator]string
+ mx sync.Mutex
+ }
+
+ numberValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*numberValidator]status
+ allocMap map[*numberValidator]string
+ redeemMap map[*numberValidator]string
+ mx sync.Mutex
+ }
+
+ stringValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*stringValidator]status
+ allocMap map[*stringValidator]string
+ redeemMap map[*stringValidator]string
+ mx sync.Mutex
+ }
+
+ schemaPropsValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*schemaPropsValidator]status
+ allocMap map[*schemaPropsValidator]string
+ redeemMap map[*schemaPropsValidator]string
+ mx sync.Mutex
+ }
+
+ formatValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*formatValidator]status
+ allocMap map[*formatValidator]string
+ redeemMap map[*formatValidator]string
+ mx sync.Mutex
+ }
+
+ typeValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*typeValidator]status
+ allocMap map[*typeValidator]string
+ redeemMap map[*typeValidator]string
+ mx sync.Mutex
+ }
+
+ schemasPool struct {
+ *sync.Pool
+ debugMap map[*spec.Schema]status
+ allocMap map[*spec.Schema]string
+ redeemMap map[*spec.Schema]string
+ mx sync.Mutex
+ }
+
+ resultsPool struct {
+ *sync.Pool
+ debugMap map[*Result]status
+ allocMap map[*Result]string
+ redeemMap map[*Result]string
+ mx sync.Mutex
+ }
+)
+
+func (p *schemaValidatorsPool) BorrowValidator() *SchemaValidator {
+ s := p.Get().(*SchemaValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled schema should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemaValidatorsPool) RedeemValidator(s *SchemaValidator) {
+ // NOTE: s might be nil. In that case, Put is a noop.
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schema should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schema should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *objectValidatorsPool) BorrowValidator() *objectValidator {
+ s := p.Get().(*objectValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled object should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *objectValidatorsPool) RedeemValidator(s *objectValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed object should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed object should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *sliceValidatorsPool) BorrowValidator() *schemaSliceValidator {
+ s := p.Get().(*schemaSliceValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled schemaSliceValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schemaSliceValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schemaSliceValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *itemsValidatorsPool) BorrowValidator() *itemsValidator {
+ s := p.Get().(*itemsValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled itemsValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *itemsValidatorsPool) RedeemValidator(s *itemsValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed itemsValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed itemsValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator {
+ s := p.Get().(*basicCommonValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled basicCommonValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed basicCommonValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed basicCommonValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *headerValidatorsPool) BorrowValidator() *HeaderValidator {
+ s := p.Get().(*HeaderValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled HeaderValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *headerValidatorsPool) RedeemValidator(s *HeaderValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed header should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed header should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *paramValidatorsPool) BorrowValidator() *ParamValidator {
+ s := p.Get().(*ParamValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled param should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *paramValidatorsPool) RedeemValidator(s *ParamValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed param should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed param should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator {
+ s := p.Get().(*basicSliceValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled basicSliceValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed basicSliceValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed basicSliceValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *numberValidatorsPool) BorrowValidator() *numberValidator {
+ s := p.Get().(*numberValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled number should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *numberValidatorsPool) RedeemValidator(s *numberValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed number should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed number should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *stringValidatorsPool) BorrowValidator() *stringValidator {
+ s := p.Get().(*stringValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled string should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *stringValidatorsPool) RedeemValidator(s *stringValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed string should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed string should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator {
+ s := p.Get().(*schemaPropsValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled param should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schemaProps should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schemaProps should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *formatValidatorsPool) BorrowValidator() *formatValidator {
+ s := p.Get().(*formatValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled format should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *formatValidatorsPool) RedeemValidator(s *formatValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed format should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed format should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *typeValidatorsPool) BorrowValidator() *typeValidator {
+ s := p.Get().(*typeValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled type should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *typeValidatorsPool) RedeemValidator(s *typeValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed type should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic(fmt.Errorf("redeemed type should have been allocated from a fresh or recycled pointer. Got status %s, already redeamed at: %s", x, p.redeemMap[s]))
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *schemasPool) BorrowSchema() *spec.Schema {
+ s := p.Get().(*spec.Schema)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled spec.Schema should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemasPool) RedeemSchema(s *spec.Schema) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed spec.Schema should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed spec.Schema should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *resultsPool) BorrowResult() *Result {
+ s := p.Get().(*Result).cleared()
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled result should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *resultsPool) RedeemResult(s *Result) {
+ if s == emptyResult {
+ if len(s.Errors) > 0 || len(s.Warnings) > 0 {
+ panic("empty result should not mutate")
+ }
+ return
+ }
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed Result should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed Result should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *allPools) allIsRedeemed(t testing.TB) bool {
+ outcome := true
+ for k, v := range p.poolOfSchemaValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemaValidator should be redeemed. Allocated by: %s", p.poolOfSchemaValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfObjectValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("objectValidator should be redeemed. Allocated by: %s", p.poolOfObjectValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSliceValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("sliceValidator should be redeemed. Allocated by: %s", p.poolOfSliceValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfItemsValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("itemsValidator should be redeemed. Allocated by: %s", p.poolOfItemsValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfBasicCommonValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("basicCommonValidator should be redeemed. Allocated by: %s", p.poolOfBasicCommonValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfHeaderValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("headerValidator should be redeemed. Allocated by: %s", p.poolOfHeaderValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfParamValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("paramValidator should be redeemed. Allocated by: %s", p.poolOfParamValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfBasicSliceValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("basicSliceValidator should be redeemed. Allocated by: %s", p.poolOfBasicSliceValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfNumberValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("numberValidator should be redeemed. Allocated by: %s", p.poolOfNumberValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfStringValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("stringValidator should be redeemed. Allocated by: %s", p.poolOfStringValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSchemaPropsValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemaPropsValidator should be redeemed. Allocated by: %s", p.poolOfSchemaPropsValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfFormatValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("formatValidator should be redeemed. Allocated by: %s", p.poolOfFormatValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfTypeValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("typeValidator should be redeemed. Allocated by: %s", p.poolOfTypeValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSchemas.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemas should be redeemed. Allocated by: %s", p.poolOfSchemas.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfResults.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("result should be redeemed. Allocated by: %s", p.poolOfResults.allocMap[k])
+ outcome = false
+ }
+
+ return outcome
+}
+
+func caller() string {
+ pc, _, _, _ := runtime.Caller(3) //nolint:dogsled
+ from, line := runtime.FuncForPC(pc).FileLine(pc)
+
+ return fmt.Sprintf("%s:%d", from, line)
+}
diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go
index 8f5f935e..c80804a9 100644
--- a/vendor/github.com/go-openapi/validate/result.go
+++ b/vendor/github.com/go-openapi/validate/result.go
@@ -15,7 +15,7 @@
package validate
import (
- "fmt"
+ stderrors "errors"
"reflect"
"strings"
@@ -23,6 +23,8 @@ import (
"github.com/go-openapi/spec"
)
+var emptyResult = &Result{MatchCount: 1}
+
// Result represents a validation result set, composed of
// errors and warnings.
//
@@ -50,8 +52,10 @@ type Result struct {
// Schemata for slice items
itemSchemata []itemSchemata
- cachedFieldSchemta map[FieldKey][]*spec.Schema
- cachedItemSchemata map[ItemKey][]*spec.Schema
+ cachedFieldSchemata map[FieldKey][]*spec.Schema
+ cachedItemSchemata map[ItemKey][]*spec.Schema
+
+ wantsRedeemOnMerge bool
}
// FieldKey is a pair of an object and a field, usable as a key for a map.
@@ -116,6 +120,9 @@ func (r *Result) Merge(others ...*Result) *Result {
}
r.mergeWithoutRootSchemata(other)
r.rootObjectSchemata.Append(other.rootObjectSchemata)
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
}
return r
}
@@ -132,10 +139,9 @@ func (r *Result) RootObjectSchemata() []*spec.Schema {
}
// FieldSchemata returns the schemata which apply to fields in objects.
-// nolint: dupl
func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema {
- if r.cachedFieldSchemta != nil {
- return r.cachedFieldSchemta
+ if r.cachedFieldSchemata != nil {
+ return r.cachedFieldSchemata
}
ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata))
@@ -147,12 +153,12 @@ func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema {
ret[key] = append(ret[key], fs.schemata.multiple...)
}
}
- r.cachedFieldSchemta = ret
+ r.cachedFieldSchemata = ret
+
return ret
}
// ItemSchemata returns the schemata which apply to items in slices.
-// nolint: dupl
func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema {
if r.cachedItemSchemata != nil {
return r.cachedItemSchemata
@@ -172,12 +178,13 @@ func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema {
}
func (r *Result) resetCaches() {
- r.cachedFieldSchemta = nil
+ r.cachedFieldSchemata = nil
r.cachedItemSchemata = nil
}
// mergeForField merges other into r, assigning other's root schemata to the given Object and field name.
-// nolint: unparam
+//
+//nolint:unparam
func (r *Result) mergeForField(obj map[string]interface{}, field string, other *Result) *Result {
if other == nil {
return r
@@ -188,18 +195,23 @@ func (r *Result) mergeForField(obj map[string]interface{}, field string, other *
if r.fieldSchemata == nil {
r.fieldSchemata = make([]fieldSchemata, len(obj))
}
+ // clone other schemata, as other is about to be redeemed to the pool
r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{
obj: obj,
field: field,
- schemata: other.rootObjectSchemata,
+ schemata: other.rootObjectSchemata.Clone(),
})
}
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
return r
}
// mergeForSlice merges other into r, assigning other's root schemata to the given slice and index.
-// nolint: unparam
+//
+//nolint:unparam
func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result {
if other == nil {
return r
@@ -210,29 +222,38 @@ func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Resul
if r.itemSchemata == nil {
r.itemSchemata = make([]itemSchemata, slice.Len())
}
+ // clone other schemata, as other is about to be redeemed to the pool
r.itemSchemata = append(r.itemSchemata, itemSchemata{
slice: slice,
index: i,
- schemata: other.rootObjectSchemata,
+ schemata: other.rootObjectSchemata.Clone(),
})
}
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
+
return r
}
// addRootObjectSchemata adds the given schemata for the root object of the result.
-// The slice schemata might be reused. I.e. do not modify it after being added to a result.
+//
+// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result.
func (r *Result) addRootObjectSchemata(s *spec.Schema) {
- r.rootObjectSchemata.Append(schemata{one: s})
+ clone := *s
+ r.rootObjectSchemata.Append(schemata{one: &clone})
}
// addPropertySchemata adds the given schemata for the object and field.
-// The slice schemata might be reused. I.e. do not modify it after being added to a result.
+//
+// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result.
func (r *Result) addPropertySchemata(obj map[string]interface{}, fld string, schema *spec.Schema) {
if r.fieldSchemata == nil {
r.fieldSchemata = make([]fieldSchemata, 0, len(obj))
}
- r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: schema}})
+ clone := *schema
+ r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}})
}
/*
@@ -255,17 +276,21 @@ func (r *Result) mergeWithoutRootSchemata(other *Result) {
if other.fieldSchemata != nil {
if r.fieldSchemata == nil {
- r.fieldSchemata = other.fieldSchemata
- } else {
- r.fieldSchemata = append(r.fieldSchemata, other.fieldSchemata...)
+ r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata))
+ }
+ for _, field := range other.fieldSchemata {
+ field.schemata = field.schemata.Clone()
+ r.fieldSchemata = append(r.fieldSchemata, field)
}
}
if other.itemSchemata != nil {
if r.itemSchemata == nil {
- r.itemSchemata = other.itemSchemata
- } else {
- r.itemSchemata = append(r.itemSchemata, other.itemSchemata...)
+ r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata))
+ }
+ for _, field := range other.itemSchemata {
+ field.schemata = field.schemata.Clone()
+ r.itemSchemata = append(r.itemSchemata, field)
}
}
}
@@ -280,6 +305,9 @@ func (r *Result) MergeAsErrors(others ...*Result) *Result {
r.AddErrors(other.Errors...)
r.AddErrors(other.Warnings...)
r.MatchCount += other.MatchCount
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
}
}
return r
@@ -295,6 +323,9 @@ func (r *Result) MergeAsWarnings(others ...*Result) *Result {
r.AddWarnings(other.Errors...)
r.AddWarnings(other.Warnings...)
r.MatchCount += other.MatchCount
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
}
}
return r
@@ -356,16 +387,21 @@ func (r *Result) keepRelevantErrors() *Result {
strippedErrors := []error{}
for _, e := range r.Errors {
if strings.HasPrefix(e.Error(), "IMPORTANT!") {
- strippedErrors = append(strippedErrors, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
+ strippedErrors = append(strippedErrors, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
}
}
strippedWarnings := []error{}
for _, e := range r.Warnings {
if strings.HasPrefix(e.Error(), "IMPORTANT!") {
- strippedWarnings = append(strippedWarnings, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
+ strippedWarnings = append(strippedWarnings, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
}
}
- strippedResult := new(Result)
+ var strippedResult *Result
+ if r.wantsRedeemOnMerge {
+ strippedResult = pools.poolOfResults.BorrowResult()
+ } else {
+ strippedResult = new(Result)
+ }
strippedResult.Errors = strippedErrors
strippedResult.Warnings = strippedWarnings
return strippedResult
@@ -427,6 +463,27 @@ func (r *Result) AsError() error {
return errors.CompositeValidationError(r.Errors...)
}
+func (r *Result) cleared() *Result {
+ // clear the Result to be reusable. Keep allocated capacity.
+ r.Errors = r.Errors[:0]
+ r.Warnings = r.Warnings[:0]
+ r.MatchCount = 0
+ r.data = nil
+ r.rootObjectSchemata.one = nil
+ r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0]
+ r.fieldSchemata = r.fieldSchemata[:0]
+ r.itemSchemata = r.itemSchemata[:0]
+ for k := range r.cachedFieldSchemata {
+ delete(r.cachedFieldSchemata, k)
+ }
+ for k := range r.cachedItemSchemata {
+ delete(r.cachedItemSchemata, k)
+ }
+ r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another
+
+ return r
+}
+
// schemata is an arbitrary number of schemata. It does a distinction between zero,
// one and many schemata to avoid slice allocations.
type schemata struct {
@@ -453,7 +510,7 @@ func (s *schemata) Slice() []*spec.Schema {
return s.multiple
}
-// appendSchemata appends the schemata in other to s. It mutated s in-place.
+// appendSchemata appends the schemata in other to s. It mutates s in-place.
func (s *schemata) Append(other schemata) {
if other.one == nil && len(other.multiple) == 0 {
return
@@ -484,3 +541,23 @@ func (s *schemata) Append(other schemata) {
}
}
}
+
+func (s schemata) Clone() schemata {
+ var clone schemata
+
+ if s.one != nil {
+ clone.one = new(spec.Schema)
+ *clone.one = *s.one
+ }
+
+ if len(s.multiple) > 0 {
+ clone.multiple = make([]*spec.Schema, len(s.multiple))
+ for idx := 0; idx < len(s.multiple); idx++ {
+ sp := new(spec.Schema)
+ *sp = *s.multiple[idx]
+ clone.multiple[idx] = sp
+ }
+ }
+
+ return clone
+}
diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go
index b817eb0e..db65264f 100644
--- a/vendor/github.com/go-openapi/validate/schema.go
+++ b/vendor/github.com/go-openapi/validate/schema.go
@@ -24,32 +24,32 @@ import (
"github.com/go-openapi/swag"
)
-var (
- specSchemaType = reflect.TypeOf(&spec.Schema{})
- specParameterType = reflect.TypeOf(&spec.Parameter{})
- specHeaderType = reflect.TypeOf(&spec.Header{})
- // specItemsType = reflect.TypeOf(&spec.Items{})
-)
-
// SchemaValidator validates data against a JSON schema
type SchemaValidator struct {
Path string
in string
Schema *spec.Schema
- validators []valueValidator
+ validators [8]valueValidator
Root interface{}
KnownFormats strfmt.Registry
- Options SchemaValidatorOptions
+ Options *SchemaValidatorOptions
}
// AgainstSchema validates the specified data against the provided schema, using a registry of supported formats.
//
// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example.
func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registry, options ...Option) error {
- res := NewSchemaValidator(schema, nil, "", formats, options...).Validate(data)
+ res := NewSchemaValidator(schema, nil, "", formats,
+ append(options, WithRecycleValidators(true), withRecycleResults(true))...,
+ ).Validate(data)
+ defer func() {
+ pools.poolOfResults.RedeemResult(res)
+ }()
+
if res.HasErrors() {
return errors.CompositeValidationError(res.Errors...)
}
+
return nil
}
@@ -57,6 +57,15 @@ func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registr
//
// Panics if the provided schema is invalid.
func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...Option) *SchemaValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newSchemaValidator(schema, rootSchema, root, formats, opts)
+}
+
+func newSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator {
if schema == nil {
return nil
}
@@ -72,17 +81,26 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string
panic(msg)
}
}
- s := SchemaValidator{
- Path: root,
- in: "body",
- Schema: schema,
- Root: rootSchema,
- KnownFormats: formats,
- Options: SchemaValidatorOptions{}}
- for _, o := range options {
- o(&s.Options)
+
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
}
- s.validators = []valueValidator{
+
+ var s *SchemaValidator
+ if opts.recycleValidators {
+ s = pools.poolOfSchemaValidators.BorrowValidator()
+ } else {
+ s = new(SchemaValidator)
+ }
+
+ s.Path = root
+ s.in = "body"
+ s.Schema = schema
+ s.Root = rootSchema
+ s.Options = opts
+ s.KnownFormats = formats
+
+ s.validators = [8]valueValidator{
s.typeValidator(),
s.schemaPropsValidator(),
s.stringValidator(),
@@ -92,7 +110,8 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string
s.commonValidator(),
s.objectValidator(),
}
- return &s
+
+ return s
}
// SetPath sets the path for this schema valdiator
@@ -101,24 +120,46 @@ func (s *SchemaValidator) SetPath(path string) {
}
// Applies returns true when this schema validator applies
-func (s *SchemaValidator) Applies(source interface{}, kind reflect.Kind) bool {
+func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool {
_, ok := source.(*spec.Schema)
return ok
}
// Validate validates the data against the schema
func (s *SchemaValidator) Validate(data interface{}) *Result {
- result := &Result{data: data}
if s == nil {
- return result
+ return emptyResult
}
- if s.Schema != nil {
+
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeemChildren()
+ s.redeem() // one-time use validator
+ }()
+ }
+
+ var result *Result
+ if s.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ result.data = data
+ } else {
+ result = &Result{data: data}
+ }
+
+ if s.Schema != nil && !s.Options.skipSchemataResult {
result.addRootObjectSchemata(s.Schema)
}
if data == nil {
+ // early exit with minimal validation
result.Merge(s.validators[0].Validate(data)) // type validator
result.Merge(s.validators[6].Validate(data)) // common validator
+
+ if s.Options.recycleValidators {
+ s.validators[0] = nil
+ s.validators[6] = nil
+ }
+
return result
}
@@ -147,6 +188,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
if erri != nil {
result.AddErrors(invalidTypeConversionMsg(s.Path, erri))
result.Inc()
+
return result
}
d = in
@@ -155,6 +197,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
if errf != nil {
result.AddErrors(invalidTypeConversionMsg(s.Path, errf))
result.Inc()
+
return result
}
d = nf
@@ -164,14 +207,26 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
kind = tpe.Kind()
}
- for _, v := range s.validators {
+ for idx, v := range s.validators {
if !v.Applies(s.Schema, kind) {
- debugLog("%T does not apply for %v", v, kind)
+ if s.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := v.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ s.validators[idx] = nil // prevents further (unsafe) usage
+ }
+
continue
}
- err := v.Validate(d)
- result.Merge(err)
+ result.Merge(v.Validate(d))
+ if s.Options.recycleValidators {
+ s.validators[idx] = nil // prevents further (unsafe) usage
+ }
result.Inc()
}
result.Inc()
@@ -180,81 +235,120 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
}
func (s *SchemaValidator) typeValidator() valueValidator {
- return &typeValidator{Type: s.Schema.Type, Nullable: s.Schema.Nullable, Format: s.Schema.Format, In: s.in, Path: s.Path}
+ return newTypeValidator(
+ s.Path,
+ s.in,
+ s.Schema.Type,
+ s.Schema.Nullable,
+ s.Schema.Format,
+ s.Options,
+ )
}
func (s *SchemaValidator) commonValidator() valueValidator {
- return &basicCommonValidator{
- Path: s.Path,
- In: s.in,
- Enum: s.Schema.Enum,
- }
+ return newBasicCommonValidator(
+ s.Path,
+ s.in,
+ s.Schema.Default,
+ s.Schema.Enum,
+ s.Options,
+ )
}
func (s *SchemaValidator) sliceValidator() valueValidator {
- return &schemaSliceValidator{
- Path: s.Path,
- In: s.in,
- MaxItems: s.Schema.MaxItems,
- MinItems: s.Schema.MinItems,
- UniqueItems: s.Schema.UniqueItems,
- AdditionalItems: s.Schema.AdditionalItems,
- Items: s.Schema.Items,
- Root: s.Root,
- KnownFormats: s.KnownFormats,
- Options: s.Options,
- }
+ return newSliceValidator(
+ s.Path,
+ s.in,
+ s.Schema.MaxItems,
+ s.Schema.MinItems,
+ s.Schema.UniqueItems,
+ s.Schema.AdditionalItems,
+ s.Schema.Items,
+ s.Root,
+ s.KnownFormats,
+ s.Options,
+ )
}
func (s *SchemaValidator) numberValidator() valueValidator {
- return &numberValidator{
- Path: s.Path,
- In: s.in,
- Default: s.Schema.Default,
- MultipleOf: s.Schema.MultipleOf,
- Maximum: s.Schema.Maximum,
- ExclusiveMaximum: s.Schema.ExclusiveMaximum,
- Minimum: s.Schema.Minimum,
- ExclusiveMinimum: s.Schema.ExclusiveMinimum,
- }
+ return newNumberValidator(
+ s.Path,
+ s.in,
+ s.Schema.Default,
+ s.Schema.MultipleOf,
+ s.Schema.Maximum,
+ s.Schema.ExclusiveMaximum,
+ s.Schema.Minimum,
+ s.Schema.ExclusiveMinimum,
+ "",
+ "",
+ s.Options,
+ )
}
func (s *SchemaValidator) stringValidator() valueValidator {
- return &stringValidator{
- Path: s.Path,
- In: s.in,
- MaxLength: s.Schema.MaxLength,
- MinLength: s.Schema.MinLength,
- Pattern: s.Schema.Pattern,
- }
+ return newStringValidator(
+ s.Path,
+ s.in,
+ nil,
+ false,
+ false,
+ s.Schema.MaxLength,
+ s.Schema.MinLength,
+ s.Schema.Pattern,
+ s.Options,
+ )
}
func (s *SchemaValidator) formatValidator() valueValidator {
- return &formatValidator{
- Path: s.Path,
- In: s.in,
- Format: s.Schema.Format,
- KnownFormats: s.KnownFormats,
- }
+ return newFormatValidator(
+ s.Path,
+ s.in,
+ s.Schema.Format,
+ s.KnownFormats,
+ s.Options,
+ )
}
func (s *SchemaValidator) schemaPropsValidator() valueValidator {
sch := s.Schema
- return newSchemaPropsValidator(s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, s.Options.Options()...)
+ return newSchemaPropsValidator(
+ s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats,
+ s.Options,
+ )
}
func (s *SchemaValidator) objectValidator() valueValidator {
- return &objectValidator{
- Path: s.Path,
- In: s.in,
- MaxProperties: s.Schema.MaxProperties,
- MinProperties: s.Schema.MinProperties,
- Required: s.Schema.Required,
- Properties: s.Schema.Properties,
- AdditionalProperties: s.Schema.AdditionalProperties,
- PatternProperties: s.Schema.PatternProperties,
- Root: s.Root,
- KnownFormats: s.KnownFormats,
- Options: s.Options,
+ return newObjectValidator(
+ s.Path,
+ s.in,
+ s.Schema.MaxProperties,
+ s.Schema.MinProperties,
+ s.Schema.Required,
+ s.Schema.Properties,
+ s.Schema.AdditionalProperties,
+ s.Schema.PatternProperties,
+ s.Root,
+ s.KnownFormats,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) redeem() {
+ pools.poolOfSchemaValidators.RedeemValidator(s)
+}
+
+func (s *SchemaValidator) redeemChildren() {
+ for i, validator := range s.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ s.validators[i] = nil // free up allocated children if not in pool
}
}
diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go
index 4b4879de..65eeebea 100644
--- a/vendor/github.com/go-openapi/validate/schema_option.go
+++ b/vendor/github.com/go-openapi/validate/schema_option.go
@@ -18,6 +18,9 @@ package validate
type SchemaValidatorOptions struct {
EnableObjectArrayTypeCheck bool
EnableArrayMustHaveItemsCheck bool
+ recycleValidators bool
+ recycleResult bool
+ skipSchemataResult bool
}
// Option sets optional rules for schema validation
@@ -45,10 +48,36 @@ func SwaggerSchema(enable bool) Option {
}
}
-// Options returns current options
+// WithRecycleValidators saves memory allocations and makes validators
+// available for a single use of Validate() only.
+//
+// When a validator is recycled, called MUST not call the Validate() method twice.
+func WithRecycleValidators(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.recycleValidators = enable
+ }
+}
+
+func withRecycleResults(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.recycleResult = enable
+ }
+}
+
+// WithSkipSchemataResult skips the deep audit payload stored in validation Result
+func WithSkipSchemataResult(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.skipSchemataResult = enable
+ }
+}
+
+// Options returns the current set of options
func (svo SchemaValidatorOptions) Options() []Option {
return []Option{
EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck),
EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck),
+ WithRecycleValidators(svo.recycleValidators),
+ withRecycleResults(svo.recycleResult),
+ WithSkipSchemataResult(svo.skipSchemataResult),
}
}
diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go
index 9bac3d29..1ca37924 100644
--- a/vendor/github.com/go-openapi/validate/schema_props.go
+++ b/vendor/github.com/go-openapi/validate/schema_props.go
@@ -30,211 +30,327 @@ type schemaPropsValidator struct {
AnyOf []spec.Schema
Not *spec.Schema
Dependencies spec.Dependencies
- anyOfValidators []SchemaValidator
- allOfValidators []SchemaValidator
- oneOfValidators []SchemaValidator
+ anyOfValidators []*SchemaValidator
+ allOfValidators []*SchemaValidator
+ oneOfValidators []*SchemaValidator
notValidator *SchemaValidator
Root interface{}
KnownFormats strfmt.Registry
- Options SchemaValidatorOptions
+ Options *SchemaValidatorOptions
}
func (s *schemaPropsValidator) SetPath(path string) {
s.Path = path
}
-func newSchemaPropsValidator(path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, options ...Option) *schemaPropsValidator {
- anyValidators := make([]SchemaValidator, 0, len(anyOf))
- for _, v := range anyOf {
- v := v
- anyValidators = append(anyValidators, *NewSchemaValidator(&v, root, path, formats, options...))
+func newSchemaPropsValidator(
+ path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry,
+ opts *SchemaValidatorOptions) *schemaPropsValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
}
- allValidators := make([]SchemaValidator, 0, len(allOf))
- for _, v := range allOf {
- v := v
- allValidators = append(allValidators, *NewSchemaValidator(&v, root, path, formats, options...))
+
+ anyValidators := make([]*SchemaValidator, 0, len(anyOf))
+ for i := range anyOf {
+ anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts))
}
- oneValidators := make([]SchemaValidator, 0, len(oneOf))
- for _, v := range oneOf {
- v := v
- oneValidators = append(oneValidators, *NewSchemaValidator(&v, root, path, formats, options...))
+ allValidators := make([]*SchemaValidator, 0, len(allOf))
+ for i := range allOf {
+ allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts))
+ }
+ oneValidators := make([]*SchemaValidator, 0, len(oneOf))
+ for i := range oneOf {
+ oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts))
}
var notValidator *SchemaValidator
if not != nil {
- notValidator = NewSchemaValidator(not, root, path, formats, options...)
+ notValidator = newSchemaValidator(not, root, path, formats, opts)
}
- schOptions := &SchemaValidatorOptions{}
- for _, o := range options {
- o(schOptions)
- }
- return &schemaPropsValidator{
- Path: path,
- In: in,
- AllOf: allOf,
- OneOf: oneOf,
- AnyOf: anyOf,
- Not: not,
- Dependencies: deps,
- anyOfValidators: anyValidators,
- allOfValidators: allValidators,
- oneOfValidators: oneValidators,
- notValidator: notValidator,
- Root: root,
- KnownFormats: formats,
- Options: *schOptions,
+ var s *schemaPropsValidator
+ if opts.recycleValidators {
+ s = pools.poolOfSchemaPropsValidators.BorrowValidator()
+ } else {
+ s = new(schemaPropsValidator)
}
+
+ s.Path = path
+ s.In = in
+ s.AllOf = allOf
+ s.OneOf = oneOf
+ s.AnyOf = anyOf
+ s.Not = not
+ s.Dependencies = deps
+ s.anyOfValidators = anyValidators
+ s.allOfValidators = allValidators
+ s.oneOfValidators = oneValidators
+ s.notValidator = notValidator
+ s.Root = root
+ s.KnownFormats = formats
+ s.Options = opts
+
+ return s
}
-func (s *schemaPropsValidator) Applies(source interface{}, kind reflect.Kind) bool {
- r := reflect.TypeOf(source) == specSchemaType
- debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind)
- return r
+func (s *schemaPropsValidator) Applies(source interface{}, _ reflect.Kind) bool {
+ _, isSchema := source.(*spec.Schema)
+ return isSchema
}
func (s *schemaPropsValidator) Validate(data interface{}) *Result {
- mainResult := new(Result)
+ var mainResult *Result
+ if s.Options.recycleResult {
+ mainResult = pools.poolOfResults.BorrowResult()
+ } else {
+ mainResult = new(Result)
+ }
// Intermediary error results
// IMPORTANT! messages from underlying validators
- keepResultAnyOf := new(Result)
- keepResultOneOf := new(Result)
- keepResultAllOf := new(Result)
+ var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result
+
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeemChildren()
+ s.redeem()
+
+ // results are redeemed when merged
+ }()
+ }
- // Validates at least one in anyOf schemas
- var firstSuccess *Result
if len(s.anyOfValidators) > 0 {
- var bestFailures *Result
- succeededOnce := false
- for _, anyOfSchema := range s.anyOfValidators {
- result := anyOfSchema.Validate(data)
- // We keep inner IMPORTANT! errors no matter what MatchCount tells us
- keepResultAnyOf.Merge(result.keepRelevantErrors())
- if result.IsValid() {
- bestFailures = nil
- succeededOnce = true
- if firstSuccess == nil {
- firstSuccess = result
- }
- keepResultAnyOf = new(Result)
- break
- }
- // MatchCount is used to select errors from the schema with most positive checks
- if bestFailures == nil || result.MatchCount > bestFailures.MatchCount {
- bestFailures = result
- }
- }
-
- if !succeededOnce {
- mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path))
- }
- if bestFailures != nil {
- mainResult.Merge(bestFailures)
- } else if firstSuccess != nil {
- mainResult.Merge(firstSuccess)
- }
+ keepResultAnyOf = pools.poolOfResults.BorrowResult()
+ s.validateAnyOf(data, mainResult, keepResultAnyOf)
}
- // Validates exactly one in oneOf schemas
if len(s.oneOfValidators) > 0 {
- var bestFailures *Result
- var firstSuccess *Result
- validated := 0
-
- for _, oneOfSchema := range s.oneOfValidators {
- result := oneOfSchema.Validate(data)
- // We keep inner IMPORTANT! errors no matter what MatchCount tells us
- keepResultOneOf.Merge(result.keepRelevantErrors())
- if result.IsValid() {
- validated++
- bestFailures = nil
- if firstSuccess == nil {
- firstSuccess = result
- }
- keepResultOneOf = new(Result)
- continue
- }
- // MatchCount is used to select errors from the schema with most positive checks
- if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) {
- bestFailures = result
- }
- }
-
- if validated != 1 {
- var additionalMsg string
- if validated == 0 {
- additionalMsg = "Found none valid"
- } else {
- additionalMsg = fmt.Sprintf("Found %d valid alternatives", validated)
- }
-
- mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, additionalMsg))
- if bestFailures != nil {
- mainResult.Merge(bestFailures)
- }
- } else if firstSuccess != nil {
- mainResult.Merge(firstSuccess)
- }
+ keepResultOneOf = pools.poolOfResults.BorrowResult()
+ s.validateOneOf(data, mainResult, keepResultOneOf)
}
- // Validates all of allOf schemas
if len(s.allOfValidators) > 0 {
- validated := 0
-
- for _, allOfSchema := range s.allOfValidators {
- result := allOfSchema.Validate(data)
- // We keep inner IMPORTANT! errors no matter what MatchCount tells us
- keepResultAllOf.Merge(result.keepRelevantErrors())
- // keepResultAllOf.Merge(result)
- if result.IsValid() {
- validated++
- }
- mainResult.Merge(result)
- }
-
- if validated != len(s.allOfValidators) {
- additionalMsg := ""
- if validated == 0 {
- additionalMsg = ". None validated"
- }
-
- mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, additionalMsg))
- }
+ keepResultAllOf = pools.poolOfResults.BorrowResult()
+ s.validateAllOf(data, mainResult, keepResultAllOf)
}
if s.notValidator != nil {
- result := s.notValidator.Validate(data)
- // We keep inner IMPORTANT! errors no matter what MatchCount tells us
- if result.IsValid() {
- mainResult.AddErrors(mustNotValidatechemaMsg(s.Path))
- }
+ s.validateNot(data, mainResult)
}
if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map {
- val := data.(map[string]interface{})
- for key := range val {
- if dep, ok := s.Dependencies[key]; ok {
-
- if dep.Schema != nil {
- mainResult.Merge(NewSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options.Options()...).Validate(data))
- continue
- }
-
- if len(dep.Property) > 0 {
- for _, depKey := range dep.Property {
- if _, ok := val[depKey]; !ok {
- mainResult.AddErrors(hasADependencyMsg(s.Path, depKey))
- }
- }
- }
- }
- }
+ s.validateDependencies(data, mainResult)
}
mainResult.Inc()
+
// In the end we retain best failures for schema validation
// plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!).
return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf)
}
+
+func (s *schemaPropsValidator) validateAnyOf(data interface{}, mainResult, keepResultAnyOf *Result) {
+ // Validates at least one in anyOf schemas
+ var bestFailures *Result
+
+ for i, anyOfSchema := range s.anyOfValidators {
+ result := anyOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.anyOfValidators[i] = nil
+ }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result
+
+ if result.IsValid() {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+
+ _ = keepResultAnyOf.cleared()
+ mainResult.Merge(result)
+
+ return
+ }
+
+ // MatchCount is used to select errors from the schema with most positive checks
+ if bestFailures == nil || result.MatchCount > bestFailures.MatchCount {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+ bestFailures = result
+
+ continue
+ }
+
+ if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+ }
+
+ mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path))
+ mainResult.Merge(bestFailures)
+}
+
+func (s *schemaPropsValidator) validateOneOf(data interface{}, mainResult, keepResultOneOf *Result) {
+ // Validates exactly one in oneOf schemas
+ var (
+ firstSuccess, bestFailures *Result
+ validated int
+ )
+
+ for i, oneOfSchema := range s.oneOfValidators {
+ result := oneOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.oneOfValidators[i] = nil
+ }
+
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result
+
+ if result.IsValid() {
+ validated++
+ _ = keepResultOneOf.cleared()
+
+ if firstSuccess == nil {
+ firstSuccess = result
+ } else if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+
+ continue
+ }
+
+ // MatchCount is used to select errors from the schema with most positive checks
+ if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+ bestFailures = result
+ } else if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+ }
+
+ switch validated {
+ case 0:
+ mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid"))
+ mainResult.Merge(bestFailures)
+ // firstSucess necessarily nil
+ case 1:
+ mainResult.Merge(firstSuccess)
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+ default:
+ mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated)))
+ mainResult.Merge(bestFailures)
+ if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(firstSuccess)
+ }
+ }
+}
+
+func (s *schemaPropsValidator) validateAllOf(data interface{}, mainResult, keepResultAllOf *Result) {
+ // Validates all of allOf schemas
+ var validated int
+
+ for i, allOfSchema := range s.allOfValidators {
+ result := allOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.allOfValidators[i] = nil
+ }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultAllOf.Merge(result.keepRelevantErrors())
+ if result.IsValid() {
+ validated++
+ }
+ mainResult.Merge(result)
+ }
+
+ switch validated {
+ case 0:
+ mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated"))
+ case len(s.allOfValidators):
+ default:
+ mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ""))
+ }
+}
+
+func (s *schemaPropsValidator) validateNot(data interface{}, mainResult *Result) {
+ result := s.notValidator.Validate(data)
+ if s.Options.recycleValidators {
+ s.notValidator = nil
+ }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ if result.IsValid() {
+ mainResult.AddErrors(mustNotValidatechemaMsg(s.Path))
+ }
+ if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+}
+
+func (s *schemaPropsValidator) validateDependencies(data interface{}, mainResult *Result) {
+ val := data.(map[string]interface{})
+ for key := range val {
+ dep, ok := s.Dependencies[key]
+ if !ok {
+ continue
+ }
+
+ if dep.Schema != nil {
+ mainResult.Merge(
+ newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data),
+ )
+ continue
+ }
+
+ if len(dep.Property) > 0 {
+ for _, depKey := range dep.Property {
+ if _, ok := val[depKey]; !ok {
+ mainResult.AddErrors(hasADependencyMsg(s.Path, depKey))
+ }
+ }
+ }
+ }
+}
+
+func (s *schemaPropsValidator) redeem() {
+ pools.poolOfSchemaPropsValidators.RedeemValidator(s)
+}
+
+func (s *schemaPropsValidator) redeemChildren() {
+ for _, v := range s.anyOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.anyOfValidators = nil
+
+ for _, v := range s.allOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.allOfValidators = nil
+
+ for _, v := range s.oneOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.oneOfValidators = nil
+
+ if s.notValidator != nil {
+ s.notValidator.redeemChildren()
+ s.notValidator.redeem()
+ s.notValidator = nil
+ }
+}
diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go
index aa429f51..13bb0208 100644
--- a/vendor/github.com/go-openapi/validate/slice_validator.go
+++ b/vendor/github.com/go-openapi/validate/slice_validator.go
@@ -32,7 +32,36 @@ type schemaSliceValidator struct {
Items *spec.SchemaOrArray
Root interface{}
KnownFormats strfmt.Registry
- Options SchemaValidatorOptions
+ Options *SchemaValidatorOptions
+}
+
+func newSliceValidator(path, in string,
+ maxItems, minItems *int64, uniqueItems bool,
+ additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray,
+ root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var v *schemaSliceValidator
+ if opts.recycleValidators {
+ v = pools.poolOfSliceValidators.BorrowValidator()
+ } else {
+ v = new(schemaSliceValidator)
+ }
+
+ v.Path = path
+ v.In = in
+ v.MaxItems = maxItems
+ v.MinItems = minItems
+ v.UniqueItems = uniqueItems
+ v.AdditionalItems = additionalItems
+ v.Items = items
+ v.Root = root
+ v.KnownFormats = formats
+ v.Options = opts
+
+ return v
}
func (s *schemaSliceValidator) SetPath(path string) {
@@ -46,7 +75,18 @@ func (s *schemaSliceValidator) Applies(source interface{}, kind reflect.Kind) bo
}
func (s *schemaSliceValidator) Validate(data interface{}) *Result {
- result := new(Result)
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
+
+ var result *Result
+ if s.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
if data == nil {
return result
}
@@ -54,8 +94,8 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result {
size := val.Len()
if s.Items != nil && s.Items.Schema != nil {
- validator := NewSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options.Options()...)
for i := 0; i < size; i++ {
+ validator := newSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options)
validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i))
value := val.Index(i)
result.mergeForSlice(val, i, validator.Validate(value.Interface()))
@@ -66,10 +106,11 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result {
if s.Items != nil && len(s.Items.Schemas) > 0 {
itemsSize = len(s.Items.Schemas)
for i := 0; i < itemsSize; i++ {
- validator := NewSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...)
- if val.Len() <= i {
+ if size <= i {
break
}
+
+ validator := newSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options)
result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface()))
}
}
@@ -79,7 +120,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result {
}
if s.AdditionalItems.Schema != nil {
for i := itemsSize; i < size-itemsSize+1; i++ {
- validator := NewSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...)
+ validator := newSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options)
result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface()))
}
}
@@ -103,3 +144,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result {
result.Inc()
return result
}
+
+func (s *schemaSliceValidator) redeem() {
+ pools.poolOfSliceValidators.RedeemValidator(s)
+}
diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go
index dff01f00..96545256 100644
--- a/vendor/github.com/go-openapi/validate/spec.go
+++ b/vendor/github.com/go-openapi/validate/spec.go
@@ -15,6 +15,8 @@
package validate
import (
+ "bytes"
+ "encoding/gob"
"encoding/json"
"fmt"
"sort"
@@ -26,23 +28,23 @@ import (
"github.com/go-openapi/loads"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
)
// Spec validates an OpenAPI 2.0 specification document.
//
// Returns an error flattening in a single standard error, all validation messages.
//
-// - TODO: $ref should not have siblings
-// - TODO: make sure documentation reflects all checks and warnings
-// - TODO: check on discriminators
-// - TODO: explicit message on unsupported keywords (better than "forbidden property"...)
-// - TODO: full list of unresolved refs
-// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples
-// - TODO: option to determine if we validate for go-swagger or in a more general context
-// - TODO: check on required properties to support anyOf, allOf, oneOf
+// - TODO: $ref should not have siblings
+// - TODO: make sure documentation reflects all checks and warnings
+// - TODO: check on discriminators
+// - TODO: explicit message on unsupported keywords (better than "forbidden property"...)
+// - TODO: full list of unresolved refs
+// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples
+// - TODO: option to determine if we validate for go-swagger or in a more general context
+// - TODO: check on required properties to support anyOf, allOf, oneOf
//
// NOTE: SecurityScopes are maps: no need to check uniqueness
-//
func Spec(doc *loads.Document, formats strfmt.Registry) error {
errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc)
if errs.HasErrors() {
@@ -53,25 +55,38 @@ func Spec(doc *loads.Document, formats strfmt.Registry) error {
// SpecValidator validates a swagger 2.0 spec
type SpecValidator struct {
- schema *spec.Schema // swagger 2.0 schema
- spec *loads.Document
- analyzer *analysis.Spec
- expanded *loads.Document
- KnownFormats strfmt.Registry
- Options Opts // validation options
+ schema *spec.Schema // swagger 2.0 schema
+ spec *loads.Document
+ analyzer *analysis.Spec
+ expanded *loads.Document
+ KnownFormats strfmt.Registry
+ Options Opts // validation options
+ schemaOptions *SchemaValidatorOptions
}
// NewSpecValidator creates a new swagger spec validator instance
func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator {
+ // schema options that apply to all called validators
+ schemaOptions := new(SchemaValidatorOptions)
+ for _, o := range []Option{
+ SwaggerSchema(true),
+ WithRecycleValidators(true),
+ // withRecycleResults(true),
+ } {
+ o(schemaOptions)
+ }
+
return &SpecValidator{
- schema: schema,
- KnownFormats: formats,
- Options: defaultOpts,
+ schema: schema,
+ KnownFormats: formats,
+ Options: defaultOpts,
+ schemaOptions: schemaOptions,
}
}
// Validate validates the swagger spec
func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
+ s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult
var sd *loads.Document
errs, warnings := new(Result), new(Result)
@@ -85,11 +100,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
s.spec = sd
s.analyzer = analysis.New(sd.Spec())
- // Swagger schema validator
- schv := NewSchemaValidator(s.schema, nil, "", s.KnownFormats, SwaggerSchema(true))
- var obj interface{}
-
// Raw spec unmarshalling errors
+ var obj interface{}
if err := json.Unmarshal(sd.Raw(), &obj); err != nil {
// NOTE: under normal conditions, the *load.Document has been already unmarshalled
// So this one is just a paranoid check on the behavior of the spec package
@@ -103,6 +115,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
warnings.AddErrors(errs.Warnings...)
}()
+ // Swagger schema validator
+ schv := newSchemaValidator(s.schema, nil, "", s.KnownFormats, s.schemaOptions)
errs.Merge(schv.Validate(obj)) // error -
// There may be a point in continuing to try and determine more accurate errors
if !s.Options.ContinueOnErrors && errs.HasErrors() {
@@ -130,13 +144,13 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
}
// Values provided as default MUST validate their schema
- df := &defaultValidator{SpecValidator: s}
+ df := &defaultValidator{SpecValidator: s, schemaOptions: s.schemaOptions}
errs.Merge(df.Validate())
// Values provided as examples MUST validate their schema
// Value provided as examples in a response without schema generate a warning
// Known limitations: examples in responses for mime type not application/json are ignored (warning)
- ex := &exampleValidator{SpecValidator: s}
+ ex := &exampleValidator{SpecValidator: s, schemaOptions: s.schemaOptions}
errs.Merge(ex.Validate())
errs.Merge(s.validateNonEmptyPathParamNames())
@@ -148,22 +162,27 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
}
func (s *SpecValidator) validateNonEmptyPathParamNames() *Result {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
if s.spec.Spec().Paths == nil {
// There is no Paths object: error
res.AddErrors(noValidPathMsg())
- } else {
- if s.spec.Spec().Paths.Paths == nil {
- // Paths may be empty: warning
- res.AddWarnings(noValidPathMsg())
- } else {
- for k := range s.spec.Spec().Paths.Paths {
- if strings.Contains(k, "{}") {
- res.AddErrors(emptyPathParameterMsg(k))
- }
- }
+
+ return res
+ }
+
+ if s.spec.Spec().Paths.Paths == nil {
+ // Paths may be empty: warning
+ res.AddWarnings(noValidPathMsg())
+
+ return res
+ }
+
+ for k := range s.spec.Spec().Paths.Paths {
+ if strings.Contains(k, "{}") {
+ res.AddErrors(emptyPathParameterMsg(k))
}
}
+
return res
}
@@ -177,7 +196,7 @@ func (s *SpecValidator) validateDuplicateOperationIDs() *Result {
// fallback on possible incomplete picture because of previous errors
analyzer = s.analyzer
}
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
known := make(map[string]int)
for _, v := range analyzer.OperationIDs() {
if v != "" {
@@ -199,7 +218,7 @@ type dupProp struct {
func (s *SpecValidator) validateDuplicatePropertyNames() *Result {
// definition can't declare a property that's already defined by one of its ancestors
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for k, sch := range s.spec.Spec().Definitions {
if len(sch.AllOf) == 0 {
continue
@@ -248,7 +267,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema,
schn := nm
schc := &sch
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for schc.Ref.String() != "" {
// gather property names
@@ -285,7 +304,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema,
}
func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there
return nil, res
@@ -335,7 +354,7 @@ func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, kno
func (s *SpecValidator) validateItems() *Result {
// validate parameter, items, schema and response objects for presence of item if type is array
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for method, pi := range s.analyzer.Operations() {
for path, op := range pi {
@@ -394,7 +413,7 @@ func (s *SpecValidator) validateItems() *Result {
// Verifies constraints on array type
func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result {
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
if !schema.Type.Contains(arrayType) {
return res
}
@@ -418,7 +437,7 @@ func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID str
func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result {
// Each defined operation path parameters must correspond to a named element in the API's path pattern.
// (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.)
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for _, l := range fromPath {
var matched bool
for _, r := range fromOperation {
@@ -456,7 +475,6 @@ func (s *SpecValidator) validateReferenced() *Result {
return &res
}
-// nolint: dupl
func (s *SpecValidator) validateReferencedParameters() *Result {
// Each referenceable definition should have references.
params := s.spec.Spec().Parameters
@@ -475,14 +493,13 @@ func (s *SpecValidator) validateReferencedParameters() *Result {
if len(expected) == 0 {
return nil
}
- result := new(Result)
+ result := pools.poolOfResults.BorrowResult()
for k := range expected {
result.AddWarnings(unusedParamMsg(k))
}
return result
}
-// nolint: dupl
func (s *SpecValidator) validateReferencedResponses() *Result {
// Each referenceable definition should have references.
responses := s.spec.Spec().Responses
@@ -501,14 +518,13 @@ func (s *SpecValidator) validateReferencedResponses() *Result {
if len(expected) == 0 {
return nil
}
- result := new(Result)
+ result := pools.poolOfResults.BorrowResult()
for k := range expected {
result.AddWarnings(unusedResponseMsg(k))
}
return result
}
-// nolint: dupl
func (s *SpecValidator) validateReferencedDefinitions() *Result {
// Each referenceable definition must have references.
defs := s.spec.Spec().Definitions
@@ -537,7 +553,7 @@ func (s *SpecValidator) validateReferencedDefinitions() *Result {
func (s *SpecValidator) validateRequiredDefinitions() *Result {
// Each property listed in the required array must be defined in the properties of the model
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
DEFINITIONS:
for d, schema := range s.spec.Spec().Definitions {
@@ -556,7 +572,7 @@ DEFINITIONS:
func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result {
// Takes care of recursive property definitions, which may be nested in additionalProperties schemas
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
propertyMatch := false
patternMatch := false
additionalPropertiesMatch := false
@@ -615,40 +631,42 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche
func (s *SpecValidator) validateParameters() *Result {
// - for each method, path is unique, regardless of path parameters
// e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are
- // considered duplicate paths
+ // considered duplicate paths, if StrictPathParamUniqueness is enabled.
// - each parameter should have a unique `name` and `type` combination
// - each operation should have only 1 parameter of type body
// - there must be at most 1 parameter in body
// - parameters with pattern property must specify valid patterns
// - $ref in parameters must resolve
// - path param must be required
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`)
for method, pi := range s.expandedAnalyzer().Operations() {
methodPaths := make(map[string]map[string]string)
for path, op := range pi {
- pathToAdd := pathHelp.stripParametersInPath(path)
+ if s.Options.StrictPathParamUniqueness {
+ pathToAdd := pathHelp.stripParametersInPath(path)
- // Warn on garbled path afer param stripping
- if rexGarbledPathSegment.MatchString(pathToAdd) {
- res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd))
- }
+ // Warn on garbled path afer param stripping
+ if rexGarbledPathSegment.MatchString(pathToAdd) {
+ res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd))
+ }
- // Check uniqueness of stripped paths
- if _, found := methodPaths[method][pathToAdd]; found {
+ // Check uniqueness of stripped paths
+ if _, found := methodPaths[method][pathToAdd]; found {
- // Sort names for stable, testable output
- if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 {
- res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd]))
+ // Sort names for stable, testable output
+ if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 {
+ res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd]))
+ } else {
+ res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path))
+ }
} else {
- res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path))
- }
- } else {
- if _, found := methodPaths[method]; !found {
- methodPaths[method] = map[string]string{}
- }
- methodPaths[method][pathToAdd] = path // Original non stripped path
+ if _, found := methodPaths[method]; !found {
+ methodPaths[method] = map[string]string{}
+ }
+ methodPaths[method][pathToAdd] = path // Original non stripped path
+ }
}
var bodyParams []string
@@ -659,7 +677,23 @@ func (s *SpecValidator) validateParameters() *Result {
// TODO: should be done after param expansion
res.Merge(s.checkUniqueParams(path, method, op))
+ // pick the root schema from the swagger specification which describes a parameter
+ origSchema, ok := s.schema.Definitions["parameter"]
+ if !ok {
+ panic("unexpected swagger schema: missing #/definitions/parameter")
+ }
+ // clone it once to avoid expanding a global schema (e.g. swagger spec)
+ paramSchema, err := deepCloneSchema(origSchema)
+ if err != nil {
+ panic(fmt.Errorf("can't clone schema: %v", err))
+ }
+
for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) {
+ // An expanded parameter must validate the Parameter schema (an unexpanded $ref always passes high-level schema validation)
+ schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions)
+ obj := swag.ToDynamicJSON(pr)
+ res.Merge(schv.Validate(obj))
+
// Validate pattern regexp for parameters with a Pattern property
if _, err := compileRegexp(pr.Pattern); err != nil {
res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern))
@@ -741,7 +775,7 @@ func (s *SpecValidator) validateParameters() *Result {
func (s *SpecValidator) validateReferencesValid() *Result {
// each reference must point to a valid object
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
for _, r := range s.analyzer.AllRefs() {
if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI
res.AddErrors(invalidRefMsg(r.String()))
@@ -767,7 +801,7 @@ func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operatio
// However, there are some issues with such a factorization:
// - analysis does not seem to fully expand params
// - param keys may be altered by x-go-name
- res := new(Result)
+ res := pools.poolOfResults.BorrowResult()
pnames := make(map[string]struct{})
if op.Parameters != nil { // Safeguard
@@ -802,3 +836,17 @@ func (s *SpecValidator) expandedAnalyzer() *analysis.Spec {
}
return s.analyzer
}
+
+func deepCloneSchema(src spec.Schema) (spec.Schema, error) {
+ var b bytes.Buffer
+ if err := gob.NewEncoder(&b).Encode(src); err != nil {
+ return spec.Schema{}, err
+ }
+
+ var dst spec.Schema
+ if err := gob.NewDecoder(&b).Decode(&dst); err != nil {
+ return spec.Schema{}, err
+ }
+
+ return dst, nil
+}
diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go
index b3757add..6d1f0f81 100644
--- a/vendor/github.com/go-openapi/validate/spec_messages.go
+++ b/vendor/github.com/go-openapi/validate/spec_messages.go
@@ -187,6 +187,8 @@ const (
// UnusedResponseWarning ...
UnusedResponseWarning = "response %q is not used anywhere"
+
+ InvalidObject = "expected an object in %q.%s"
)
// Additional error codes
@@ -347,11 +349,15 @@ func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) err
func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error {
return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ)
}
+func invalidObjectMsg(path, in string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidObject, path, in)
+}
// disabled
-// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error {
-// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method)
-// }
+//
+// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error {
+// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method)
+// }
func someParametersBrokenMsg(path, method, operationID string) errors.Error {
return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID)
}
diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go
index 87646758..f87abb3d 100644
--- a/vendor/github.com/go-openapi/validate/type.go
+++ b/vendor/github.com/go-openapi/validate/type.go
@@ -25,11 +25,34 @@ import (
)
type typeValidator struct {
+ Path string
+ In string
Type spec.StringOrArray
Nullable bool
Format string
- In string
- Path string
+ Options *SchemaValidatorOptions
+}
+
+func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, format string, opts *SchemaValidatorOptions) *typeValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var t *typeValidator
+ if opts.recycleValidators {
+ t = pools.poolOfTypeValidators.BorrowValidator()
+ } else {
+ t = new(typeValidator)
+ }
+
+ t.Path = path
+ t.In = in
+ t.Type = typ
+ t.Nullable = nullable
+ t.Format = format
+ t.Options = opts
+
+ return t
}
func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) {
@@ -90,7 +113,7 @@ func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) {
default:
val := reflect.ValueOf(data)
tpe := val.Type()
- switch tpe.Kind() {
+ switch tpe.Kind() { //nolint:exhaustive
case reflect.Bool:
return booleanType, ""
case reflect.String:
@@ -125,23 +148,33 @@ func (t *typeValidator) SetPath(path string) {
t.Path = path
}
-func (t *typeValidator) Applies(source interface{}, kind reflect.Kind) bool {
+func (t *typeValidator) Applies(source interface{}, _ reflect.Kind) bool {
// typeValidator applies to Schema, Parameter and Header objects
- stpe := reflect.TypeOf(source)
- r := (len(t.Type) > 0 || t.Format != "") && (stpe == specSchemaType || stpe == specParameterType || stpe == specHeaderType)
- debugLog("type validator for %q applies %t for %T (kind: %v)\n", t.Path, r, source, kind)
- return r
+ switch source.(type) {
+ case *spec.Schema:
+ case *spec.Parameter:
+ case *spec.Header:
+ default:
+ return false
+ }
+
+ return (len(t.Type) > 0 || t.Format != "")
}
func (t *typeValidator) Validate(data interface{}) *Result {
- result := new(Result)
- result.Inc()
+ if t.Options.recycleValidators {
+ defer func() {
+ t.redeem()
+ }()
+ }
+
if data == nil {
// nil or zero value for the passed structure require Type: null
if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this
- return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType))
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult)
}
- return result
+
+ return emptyResult
}
// check if the type matches, should be used in every validator chain as first item
@@ -151,8 +184,6 @@ func (t *typeValidator) Validate(data interface{}) *Result {
// infer schema type (JSON) and format from passed data type
schType, format := t.schemaInfoForType(data)
- debugLog("path: %s, schType: %s, format: %s, expType: %s, expFmt: %s, kind: %s", t.Path, schType, format, t.Type, t.Format, val.Kind().String())
-
// check numerical types
// TODO: check unsigned ints
// TODO: check json.Number (see schema.go)
@@ -163,15 +194,20 @@ func (t *typeValidator) Validate(data interface{}) *Result {
if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !(t.Type.Contains(schType) || format == t.Format || isFloatInt || isIntFloat || isLowerInt || isLowerFloat) {
// TODO: test case
- return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format))
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult)
}
if !(t.Type.Contains(numberType) || t.Type.Contains(integerType)) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) {
- return result
+ return emptyResult
}
if !(t.Type.Contains(schType) || isFloatInt || isIntFloat) {
- return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType))
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType), t.Options.recycleResult)
}
- return result
+
+ return emptyResult
+}
+
+func (t *typeValidator) redeem() {
+ pools.poolOfTypeValidators.RedeemValidator(t)
}
diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go
index 38cdb9bb..c083aecc 100644
--- a/vendor/github.com/go-openapi/validate/validator.go
+++ b/vendor/github.com/go-openapi/validate/validator.go
@@ -39,20 +39,31 @@ type itemsValidator struct {
root interface{}
path string
in string
- validators []valueValidator
+ validators [6]valueValidator
KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
}
-func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry) *itemsValidator {
- iv := &itemsValidator{path: path, in: in, items: items, root: root, KnownFormats: formats}
- iv.validators = []valueValidator{
- &typeValidator{
- Type: spec.StringOrArray([]string{items.Type}),
- Nullable: items.Nullable,
- Format: items.Format,
- In: in,
- Path: path,
- },
+func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *itemsValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var iv *itemsValidator
+ if opts.recycleValidators {
+ iv = pools.poolOfItemsValidators.BorrowValidator()
+ } else {
+ iv = new(itemsValidator)
+ }
+
+ iv.path = path
+ iv.in = in
+ iv.items = items
+ iv.root = root
+ iv.KnownFormats = formats
+ iv.Options = opts
+ iv.validators = [6]valueValidator{
+ iv.typeValidator(),
iv.stringValidator(),
iv.formatValidator(),
iv.numberValidator(),
@@ -63,77 +74,152 @@ func newItemsValidator(path, in string, items *spec.Items, root interface{}, for
}
func (i *itemsValidator) Validate(index int, data interface{}) *Result {
+ if i.Options.recycleValidators {
+ defer func() {
+ i.redeemChildren()
+ i.redeem()
+ }()
+ }
+
tpe := reflect.TypeOf(data)
kind := tpe.Kind()
- mainResult := new(Result)
+ var result *Result
+ if i.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
path := fmt.Sprintf("%s.%d", i.path, index)
- for _, validator := range i.validators {
- validator.SetPath(path)
- if validator.Applies(i.root, kind) {
- result := validator.Validate(data)
- mainResult.Merge(result)
- mainResult.Inc()
- if result != nil && result.HasErrors() {
- return mainResult
+ for idx, validator := range i.validators {
+ if !validator.Applies(i.root, kind) {
+ if i.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ i.validators[idx] = nil // prevents further (unsafe) usage
}
+
+ continue
+ }
+
+ validator.SetPath(path)
+ err := validator.Validate(data)
+ if i.Options.recycleValidators {
+ i.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ result.Inc()
+ if err.HasErrors() {
+ result.Merge(err)
+
+ break
+ }
+
+ result.Merge(err)
}
}
- return mainResult
+
+ return result
+}
+
+func (i *itemsValidator) typeValidator() valueValidator {
+ return newTypeValidator(
+ i.path,
+ i.in,
+ spec.StringOrArray([]string{i.items.Type}),
+ i.items.Nullable,
+ i.items.Format,
+ i.Options,
+ )
}
func (i *itemsValidator) commonValidator() valueValidator {
- return &basicCommonValidator{
- In: i.in,
- Default: i.items.Default,
- Enum: i.items.Enum,
- }
+ return newBasicCommonValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.Enum,
+ i.Options,
+ )
}
func (i *itemsValidator) sliceValidator() valueValidator {
- return &basicSliceValidator{
- In: i.in,
- Default: i.items.Default,
- MaxItems: i.items.MaxItems,
- MinItems: i.items.MinItems,
- UniqueItems: i.items.UniqueItems,
- Source: i.root,
- Items: i.items.Items,
- KnownFormats: i.KnownFormats,
- }
+ return newBasicSliceValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.MaxItems,
+ i.items.MinItems,
+ i.items.UniqueItems,
+ i.items.Items,
+ i.root,
+ i.KnownFormats,
+ i.Options,
+ )
}
func (i *itemsValidator) numberValidator() valueValidator {
- return &numberValidator{
- In: i.in,
- Default: i.items.Default,
- MultipleOf: i.items.MultipleOf,
- Maximum: i.items.Maximum,
- ExclusiveMaximum: i.items.ExclusiveMaximum,
- Minimum: i.items.Minimum,
- ExclusiveMinimum: i.items.ExclusiveMinimum,
- Type: i.items.Type,
- Format: i.items.Format,
- }
+ return newNumberValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.MultipleOf,
+ i.items.Maximum,
+ i.items.ExclusiveMaximum,
+ i.items.Minimum,
+ i.items.ExclusiveMinimum,
+ i.items.Type,
+ i.items.Format,
+ i.Options,
+ )
}
func (i *itemsValidator) stringValidator() valueValidator {
- return &stringValidator{
- In: i.in,
- Default: i.items.Default,
- MaxLength: i.items.MaxLength,
- MinLength: i.items.MinLength,
- Pattern: i.items.Pattern,
- AllowEmptyValue: false,
- }
+ return newStringValidator(
+ "",
+ i.in,
+ i.items.Default,
+ false, // Required
+ false, // AllowEmpty
+ i.items.MaxLength,
+ i.items.MinLength,
+ i.items.Pattern,
+ i.Options,
+ )
}
func (i *itemsValidator) formatValidator() valueValidator {
- return &formatValidator{
- In: i.in,
- //Default: i.items.Default,
- Format: i.items.Format,
- KnownFormats: i.KnownFormats,
+ return newFormatValidator(
+ "",
+ i.in,
+ i.items.Format,
+ i.KnownFormats,
+ i.Options,
+ )
+}
+
+func (i *itemsValidator) redeem() {
+ pools.poolOfItemsValidators.RedeemValidator(i)
+}
+
+func (i *itemsValidator) redeemChildren() {
+ for idx, validator := range i.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ i.validators[idx] = nil // free up allocated children if not in pool
}
}
@@ -142,265 +228,501 @@ type basicCommonValidator struct {
In string
Default interface{}
Enum []interface{}
+ Options *SchemaValidatorOptions
+}
+
+func newBasicCommonValidator(path, in string, def interface{}, enum []interface{}, opts *SchemaValidatorOptions) *basicCommonValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var b *basicCommonValidator
+ if opts.recycleValidators {
+ b = pools.poolOfBasicCommonValidators.BorrowValidator()
+ } else {
+ b = new(basicCommonValidator)
+ }
+
+ b.Path = path
+ b.In = in
+ b.Default = def
+ b.Enum = enum
+ b.Options = opts
+
+ return b
}
func (b *basicCommonValidator) SetPath(path string) {
b.Path = path
}
-func (b *basicCommonValidator) Applies(source interface{}, kind reflect.Kind) bool {
+func (b *basicCommonValidator) Applies(source interface{}, _ reflect.Kind) bool {
switch source.(type) {
case *spec.Parameter, *spec.Schema, *spec.Header:
return true
+ default:
+ return false
}
- return false
}
func (b *basicCommonValidator) Validate(data interface{}) (res *Result) {
- if len(b.Enum) > 0 {
- for _, enumValue := range b.Enum {
- actualType := reflect.TypeOf(enumValue)
- if actualType != nil { // Safeguard
- expectedValue := reflect.ValueOf(data)
- if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
- if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) {
- return nil
- }
- }
- }
- }
- return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum))
+ if b.Options.recycleValidators {
+ defer func() {
+ b.redeem()
+ }()
}
- return nil
+
+ if len(b.Enum) == 0 {
+ return nil
+ }
+
+ for _, enumValue := range b.Enum {
+ actualType := reflect.TypeOf(enumValue)
+ if actualType == nil { // Safeguard
+ continue
+ }
+
+ expectedValue := reflect.ValueOf(data)
+ if expectedValue.IsValid() &&
+ expectedValue.Type().ConvertibleTo(actualType) &&
+ reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) {
+ return nil
+ }
+ }
+
+ return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum), b.Options.recycleResult)
+}
+
+func (b *basicCommonValidator) redeem() {
+ pools.poolOfBasicCommonValidators.RedeemValidator(b)
}
// A HeaderValidator has very limited subset of validations to apply
type HeaderValidator struct {
name string
header *spec.Header
- validators []valueValidator
+ validators [6]valueValidator
KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
}
// NewHeaderValidator creates a new header validator object
-func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry) *HeaderValidator {
- p := &HeaderValidator{name: name, header: header, KnownFormats: formats}
- p.validators = []valueValidator{
- &typeValidator{
- Type: spec.StringOrArray([]string{header.Type}),
- Nullable: header.Nullable,
- Format: header.Format,
- In: "header",
- Path: name,
- },
+func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newHeaderValidator(name, header, formats, opts)
+}
+
+func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, opts *SchemaValidatorOptions) *HeaderValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var p *HeaderValidator
+ if opts.recycleValidators {
+ p = pools.poolOfHeaderValidators.BorrowValidator()
+ } else {
+ p = new(HeaderValidator)
+ }
+
+ p.name = name
+ p.header = header
+ p.KnownFormats = formats
+ p.Options = opts
+ p.validators = [6]valueValidator{
+ newTypeValidator(
+ name,
+ "header",
+ spec.StringOrArray([]string{header.Type}),
+ header.Nullable,
+ header.Format,
+ p.Options,
+ ),
p.stringValidator(),
p.formatValidator(),
p.numberValidator(),
p.sliceValidator(),
p.commonValidator(),
}
+
return p
}
// Validate the value of the header against its schema
func (p *HeaderValidator) Validate(data interface{}) *Result {
- result := new(Result)
+ if p.Options.recycleValidators {
+ defer func() {
+ p.redeemChildren()
+ p.redeem()
+ }()
+ }
+
+ if data == nil {
+ return nil
+ }
+
+ var result *Result
+ if p.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
tpe := reflect.TypeOf(data)
kind := tpe.Kind()
- for _, validator := range p.validators {
- if validator.Applies(p.header, kind) {
- if err := validator.Validate(data); err != nil {
- result.Merge(err)
- if err.HasErrors() {
- return result
+ for idx, validator := range p.validators {
+ if !validator.Applies(p.header, kind) {
+ if p.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
}
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // prevents further (unsafe) usage
}
+
+ continue
+ }
+
+ err := validator.Validate(data)
+ if p.Options.recycleValidators {
+ p.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ if err.HasErrors() {
+ result.Merge(err)
+ break
+ }
+ result.Merge(err)
}
}
- return nil
+
+ return result
}
func (p *HeaderValidator) commonValidator() valueValidator {
- return &basicCommonValidator{
- Path: p.name,
- In: "response",
- Default: p.header.Default,
- Enum: p.header.Enum,
- }
+ return newBasicCommonValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.Enum,
+ p.Options,
+ )
}
func (p *HeaderValidator) sliceValidator() valueValidator {
- return &basicSliceValidator{
- Path: p.name,
- In: "response",
- Default: p.header.Default,
- MaxItems: p.header.MaxItems,
- MinItems: p.header.MinItems,
- UniqueItems: p.header.UniqueItems,
- Items: p.header.Items,
- Source: p.header,
- KnownFormats: p.KnownFormats,
- }
+ return newBasicSliceValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.MaxItems,
+ p.header.MinItems,
+ p.header.UniqueItems,
+ p.header.Items,
+ p.header,
+ p.KnownFormats,
+ p.Options,
+ )
}
func (p *HeaderValidator) numberValidator() valueValidator {
- return &numberValidator{
- Path: p.name,
- In: "response",
- Default: p.header.Default,
- MultipleOf: p.header.MultipleOf,
- Maximum: p.header.Maximum,
- ExclusiveMaximum: p.header.ExclusiveMaximum,
- Minimum: p.header.Minimum,
- ExclusiveMinimum: p.header.ExclusiveMinimum,
- Type: p.header.Type,
- Format: p.header.Format,
- }
+ return newNumberValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.MultipleOf,
+ p.header.Maximum,
+ p.header.ExclusiveMaximum,
+ p.header.Minimum,
+ p.header.ExclusiveMinimum,
+ p.header.Type,
+ p.header.Format,
+ p.Options,
+ )
}
func (p *HeaderValidator) stringValidator() valueValidator {
- return &stringValidator{
- Path: p.name,
- In: "response",
- Default: p.header.Default,
- Required: true,
- MaxLength: p.header.MaxLength,
- MinLength: p.header.MinLength,
- Pattern: p.header.Pattern,
- AllowEmptyValue: false,
- }
+ return newStringValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ true,
+ false,
+ p.header.MaxLength,
+ p.header.MinLength,
+ p.header.Pattern,
+ p.Options,
+ )
}
func (p *HeaderValidator) formatValidator() valueValidator {
- return &formatValidator{
- Path: p.name,
- In: "response",
- //Default: p.header.Default,
- Format: p.header.Format,
- KnownFormats: p.KnownFormats,
+ return newFormatValidator(
+ p.name,
+ "response",
+ p.header.Format,
+ p.KnownFormats,
+ p.Options,
+ )
+}
+
+func (p *HeaderValidator) redeem() {
+ pools.poolOfHeaderValidators.RedeemValidator(p)
+}
+
+func (p *HeaderValidator) redeemChildren() {
+ for idx, validator := range p.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // free up allocated children if not in pool
}
}
// A ParamValidator has very limited subset of validations to apply
type ParamValidator struct {
param *spec.Parameter
- validators []valueValidator
+ validators [6]valueValidator
KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
}
// NewParamValidator creates a new param validator object
-func NewParamValidator(param *spec.Parameter, formats strfmt.Registry) *ParamValidator {
- p := &ParamValidator{param: param, KnownFormats: formats}
- p.validators = []valueValidator{
- &typeValidator{
- Type: spec.StringOrArray([]string{param.Type}),
- Nullable: param.Nullable,
- Format: param.Format,
- In: param.In,
- Path: param.Name,
- },
+func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newParamValidator(param, formats, opts)
+}
+
+func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *SchemaValidatorOptions) *ParamValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var p *ParamValidator
+ if opts.recycleValidators {
+ p = pools.poolOfParamValidators.BorrowValidator()
+ } else {
+ p = new(ParamValidator)
+ }
+
+ p.param = param
+ p.KnownFormats = formats
+ p.Options = opts
+ p.validators = [6]valueValidator{
+ newTypeValidator(
+ param.Name,
+ param.In,
+ spec.StringOrArray([]string{param.Type}),
+ param.Nullable,
+ param.Format,
+ p.Options,
+ ),
p.stringValidator(),
p.formatValidator(),
p.numberValidator(),
p.sliceValidator(),
p.commonValidator(),
}
+
return p
}
// Validate the data against the description of the parameter
func (p *ParamValidator) Validate(data interface{}) *Result {
- result := new(Result)
+ if data == nil {
+ return nil
+ }
+
+ var result *Result
+ if p.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
tpe := reflect.TypeOf(data)
kind := tpe.Kind()
+ if p.Options.recycleValidators {
+ defer func() {
+ p.redeemChildren()
+ p.redeem()
+ }()
+ }
+
// TODO: validate type
- for _, validator := range p.validators {
- if validator.Applies(p.param, kind) {
- if err := validator.Validate(data); err != nil {
- result.Merge(err)
- if err.HasErrors() {
- return result
+ for idx, validator := range p.validators {
+ if !validator.Applies(p.param, kind) {
+ if p.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
}
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // prevents further (unsafe) usage
}
+
+ continue
+ }
+
+ err := validator.Validate(data)
+ if p.Options.recycleValidators {
+ p.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ if err.HasErrors() {
+ result.Merge(err)
+ break
+ }
+ result.Merge(err)
}
}
- return nil
+
+ return result
}
func (p *ParamValidator) commonValidator() valueValidator {
- return &basicCommonValidator{
- Path: p.param.Name,
- In: p.param.In,
- Default: p.param.Default,
- Enum: p.param.Enum,
- }
+ return newBasicCommonValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.Enum,
+ p.Options,
+ )
}
func (p *ParamValidator) sliceValidator() valueValidator {
- return &basicSliceValidator{
- Path: p.param.Name,
- In: p.param.In,
- Default: p.param.Default,
- MaxItems: p.param.MaxItems,
- MinItems: p.param.MinItems,
- UniqueItems: p.param.UniqueItems,
- Items: p.param.Items,
- Source: p.param,
- KnownFormats: p.KnownFormats,
- }
+ return newBasicSliceValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.MaxItems,
+ p.param.MinItems,
+ p.param.UniqueItems,
+ p.param.Items,
+ p.param,
+ p.KnownFormats,
+ p.Options,
+ )
}
func (p *ParamValidator) numberValidator() valueValidator {
- return &numberValidator{
- Path: p.param.Name,
- In: p.param.In,
- Default: p.param.Default,
- MultipleOf: p.param.MultipleOf,
- Maximum: p.param.Maximum,
- ExclusiveMaximum: p.param.ExclusiveMaximum,
- Minimum: p.param.Minimum,
- ExclusiveMinimum: p.param.ExclusiveMinimum,
- Type: p.param.Type,
- Format: p.param.Format,
- }
+ return newNumberValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.MultipleOf,
+ p.param.Maximum,
+ p.param.ExclusiveMaximum,
+ p.param.Minimum,
+ p.param.ExclusiveMinimum,
+ p.param.Type,
+ p.param.Format,
+ p.Options,
+ )
}
func (p *ParamValidator) stringValidator() valueValidator {
- return &stringValidator{
- Path: p.param.Name,
- In: p.param.In,
- Default: p.param.Default,
- AllowEmptyValue: p.param.AllowEmptyValue,
- Required: p.param.Required,
- MaxLength: p.param.MaxLength,
- MinLength: p.param.MinLength,
- Pattern: p.param.Pattern,
- }
+ return newStringValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.Required,
+ p.param.AllowEmptyValue,
+ p.param.MaxLength,
+ p.param.MinLength,
+ p.param.Pattern,
+ p.Options,
+ )
}
func (p *ParamValidator) formatValidator() valueValidator {
- return &formatValidator{
- Path: p.param.Name,
- In: p.param.In,
- //Default: p.param.Default,
- Format: p.param.Format,
- KnownFormats: p.KnownFormats,
+ return newFormatValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Format,
+ p.KnownFormats,
+ p.Options,
+ )
+}
+
+func (p *ParamValidator) redeem() {
+ pools.poolOfParamValidators.RedeemValidator(p)
+}
+
+func (p *ParamValidator) redeemChildren() {
+ for idx, validator := range p.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // free up allocated children if not in pool
}
}
type basicSliceValidator struct {
- Path string
- In string
- Default interface{}
- MaxItems *int64
- MinItems *int64
- UniqueItems bool
- Items *spec.Items
- Source interface{}
- itemsValidator *itemsValidator
- KnownFormats strfmt.Registry
+ Path string
+ In string
+ Default interface{}
+ MaxItems *int64
+ MinItems *int64
+ UniqueItems bool
+ Items *spec.Items
+ Source interface{}
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func newBasicSliceValidator(
+ path, in string,
+ def interface{}, maxItems, minItems *int64, uniqueItems bool, items *spec.Items,
+ source interface{}, formats strfmt.Registry,
+ opts *SchemaValidatorOptions) *basicSliceValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var s *basicSliceValidator
+ if opts.recycleValidators {
+ s = pools.poolOfBasicSliceValidators.BorrowValidator()
+ } else {
+ s = new(basicSliceValidator)
+ }
+
+ s.Path = path
+ s.In = in
+ s.Default = def
+ s.MaxItems = maxItems
+ s.MinItems = minItems
+ s.UniqueItems = uniqueItems
+ s.Items = items
+ s.Source = source
+ s.KnownFormats = formats
+ s.Options = opts
+
+ return s
}
func (s *basicSliceValidator) SetPath(path string) {
@@ -411,60 +733,61 @@ func (s *basicSliceValidator) Applies(source interface{}, kind reflect.Kind) boo
switch source.(type) {
case *spec.Parameter, *spec.Items, *spec.Header:
return kind == reflect.Slice
+ default:
+ return false
}
- return false
}
func (s *basicSliceValidator) Validate(data interface{}) *Result {
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
val := reflect.ValueOf(data)
size := int64(val.Len())
if s.MinItems != nil {
if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.MaxItems != nil {
if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.UniqueItems {
if err := UniqueItems(s.Path, s.In, data); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
- if s.itemsValidator == nil && s.Items != nil {
- s.itemsValidator = newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats)
+ if s.Items == nil {
+ return nil
}
- if s.itemsValidator != nil {
- for i := 0; i < int(size); i++ {
- ele := val.Index(i)
- if err := s.itemsValidator.Validate(i, ele.Interface()); err != nil && err.HasErrors() {
+ for i := 0; i < int(size); i++ {
+ itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options)
+ ele := val.Index(i)
+ if err := itemsValidator.Validate(i, ele.Interface()); err != nil {
+ if err.HasErrors() {
return err
}
+ if err.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(err)
+ }
}
}
+
return nil
}
-/* unused
-func (s *basicSliceValidator) hasDuplicates(value reflect.Value, size int) bool {
- dict := make(map[interface{}]struct{})
- for i := 0; i < size; i++ {
- ele := value.Index(i)
- if _, ok := dict[ele.Interface()]; ok {
- return true
- }
- dict[ele.Interface()] = struct{}{}
- }
- return false
+func (s *basicSliceValidator) redeem() {
+ pools.poolOfBasicSliceValidators.RedeemValidator(s)
}
-*/
type numberValidator struct {
Path string
@@ -476,8 +799,40 @@ type numberValidator struct {
Minimum *float64
ExclusiveMinimum bool
// Allows for more accurate behavior regarding integers
- Type string
- Format string
+ Type string
+ Format string
+ Options *SchemaValidatorOptions
+}
+
+func newNumberValidator(
+ path, in string, def interface{},
+ multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool,
+ typ, format string,
+ opts *SchemaValidatorOptions) *numberValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var n *numberValidator
+ if opts.recycleValidators {
+ n = pools.poolOfNumberValidators.BorrowValidator()
+ } else {
+ n = new(numberValidator)
+ }
+
+ n.Path = path
+ n.In = in
+ n.Default = def
+ n.MultipleOf = multipleOf
+ n.Maximum = maximum
+ n.ExclusiveMaximum = exclusiveMaximum
+ n.Minimum = minimum
+ n.ExclusiveMinimum = exclusiveMinimum
+ n.Type = typ
+ n.Format = format
+ n.Options = opts
+
+ return n
}
func (n *numberValidator) SetPath(path string) {
@@ -489,12 +844,10 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool {
case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header:
isInt := kind >= reflect.Int && kind <= reflect.Uint64
isFloat := kind == reflect.Float32 || kind == reflect.Float64
- r := isInt || isFloat
- debugLog("schema props validator for %q applies %t for %T (kind: %v) isInt=%t, isFloat=%t\n", n.Path, r, source, kind, isInt, isFloat)
- return r
+ return isInt || isFloat
+ default:
+ return false
}
- debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", n.Path, false, source, kind)
- return false
}
// Validate provides a validator for generic JSON numbers,
@@ -519,11 +872,18 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool {
//
// TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?)
func (n *numberValidator) Validate(val interface{}) *Result {
- res := new(Result)
+ if n.Options.recycleValidators {
+ defer func() {
+ n.redeem()
+ }()
+ }
- resMultiple := new(Result)
- resMinimum := new(Result)
- resMaximum := new(Result)
+ var res, resMultiple, resMinimum, resMaximum *Result
+ if n.Options.recycleResult {
+ res = pools.poolOfResults.BorrowResult()
+ } else {
+ res = new(Result)
+ }
// Used only to attempt to validate constraint on value,
// even though value or constraint specified do not match type and format
@@ -533,68 +893,106 @@ func (n *numberValidator) Validate(val interface{}) *Result {
res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path))
if n.MultipleOf != nil {
+ resMultiple = pools.poolOfResults.BorrowResult()
+
// Is the constraint specifier within the range of the specific numeric type and format?
resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path))
if resMultiple.IsValid() {
// Constraint validated with compatible types
if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil {
- resMultiple.Merge(errorHelp.sErr(err))
+ resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
} else {
// Constraint nevertheless validated, converted as general number
if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil {
- resMultiple.Merge(errorHelp.sErr(err))
+ resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
}
}
- // nolint: dupl
if n.Maximum != nil {
+ resMaximum = pools.poolOfResults.BorrowResult()
+
// Is the constraint specifier within the range of the specific numeric type and format?
resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path))
if resMaximum.IsValid() {
// Constraint validated with compatible types
if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil {
- resMaximum.Merge(errorHelp.sErr(err))
+ resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
} else {
// Constraint nevertheless validated, converted as general number
if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil {
- resMaximum.Merge(errorHelp.sErr(err))
+ resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
}
}
- // nolint: dupl
if n.Minimum != nil {
+ resMinimum = pools.poolOfResults.BorrowResult()
+
// Is the constraint specifier within the range of the specific numeric type and format?
resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path))
if resMinimum.IsValid() {
// Constraint validated with compatible types
if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil {
- resMinimum.Merge(errorHelp.sErr(err))
+ resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
} else {
// Constraint nevertheless validated, converted as general number
if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil {
- resMinimum.Merge(errorHelp.sErr(err))
+ resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
}
}
}
res.Merge(resMultiple, resMinimum, resMaximum)
res.Inc()
+
return res
}
+func (n *numberValidator) redeem() {
+ pools.poolOfNumberValidators.RedeemValidator(n)
+}
+
type stringValidator struct {
+ Path string
+ In string
Default interface{}
Required bool
AllowEmptyValue bool
MaxLength *int64
MinLength *int64
Pattern string
- Path string
- In string
+ Options *SchemaValidatorOptions
+}
+
+func newStringValidator(
+ path, in string,
+ def interface{}, required, allowEmpty bool, maxLength, minLength *int64, pattern string,
+ opts *SchemaValidatorOptions) *stringValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var s *stringValidator
+ if opts.recycleValidators {
+ s = pools.poolOfStringValidators.BorrowValidator()
+ } else {
+ s = new(stringValidator)
+ }
+
+ s.Path = path
+ s.In = in
+ s.Default = def
+ s.Required = required
+ s.AllowEmptyValue = allowEmpty
+ s.MaxLength = maxLength
+ s.MinLength = minLength
+ s.Pattern = pattern
+ s.Options = opts
+
+ return s
}
func (s *stringValidator) SetPath(path string) {
@@ -604,42 +1002,50 @@ func (s *stringValidator) SetPath(path string) {
func (s *stringValidator) Applies(source interface{}, kind reflect.Kind) bool {
switch source.(type) {
case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header:
- r := kind == reflect.String
- debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind)
- return r
+ return kind == reflect.String
+ default:
+ return false
}
- debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, false, source, kind)
- return false
}
func (s *stringValidator) Validate(val interface{}) *Result {
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
+
data, ok := val.(string)
if !ok {
- return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val))
+ return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val), s.Options.recycleResult)
}
if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") {
if err := RequiredString(s.Path, s.In, data); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.MaxLength != nil {
if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.MinLength != nil {
if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
if s.Pattern != "" {
if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil {
- return errorHelp.sErr(err)
+ return errorHelp.sErr(err, s.Options.recycleResult)
}
}
return nil
}
+
+func (s *stringValidator) redeem() {
+ pools.poolOfStringValidators.RedeemValidator(s)
+}
diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go
index e7ad8c10..5f6f5ee6 100644
--- a/vendor/github.com/go-openapi/validate/values.go
+++ b/vendor/github.com/go-openapi/validate/values.go
@@ -120,7 +120,7 @@ func UniqueItems(path, in string, data interface{}) *errors.Validation {
// MinLength validates a string for minimum length
func MinLength(path, in, data string, minLength int64) *errors.Validation {
- strLen := int64(utf8.RuneCount([]byte(data)))
+ strLen := int64(utf8.RuneCountInString(data))
if strLen < minLength {
return errors.TooShort(path, in, minLength, data)
}
@@ -129,7 +129,7 @@ func MinLength(path, in, data string, minLength int64) *errors.Validation {
// MaxLength validates a string for maximum length
func MaxLength(path, in, data string, maxLength int64) *errors.Validation {
- strLen := int64(utf8.RuneCount([]byte(data)))
+ strLen := int64(utf8.RuneCountInString(data))
if strLen > maxLength {
return errors.TooLong(path, in, maxLength, data)
}
@@ -315,7 +315,7 @@ func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.V
// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
func MaximumNativeType(path, in string, val interface{}, max float64, exclusive bool) *errors.Validation {
kind := reflect.ValueOf(val).Type().Kind()
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value := valueHelp.asInt64(val)
return MaximumInt(path, in, value, int64(max), exclusive)
@@ -345,7 +345,7 @@ func MaximumNativeType(path, in string, val interface{}, max float64, exclusive
// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
func MinimumNativeType(path, in string, val interface{}, min float64, exclusive bool) *errors.Validation {
kind := reflect.ValueOf(val).Type().Kind()
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value := valueHelp.asInt64(val)
return MinimumInt(path, in, value, int64(min), exclusive)
@@ -375,7 +375,7 @@ func MinimumNativeType(path, in string, val interface{}, min float64, exclusive
// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
func MultipleOfNativeType(path, in string, val interface{}, multipleOf float64) *errors.Validation {
kind := reflect.ValueOf(val).Type().Kind()
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value := valueHelp.asInt64(val)
return MultipleOfInt(path, in, value, int64(multipleOf))
@@ -399,7 +399,7 @@ func IsValueValidAgainstRange(val interface{}, typeName, format, prefix, path st
// What is the string representation of val
var stringRep string
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
stringRep = swag.FormatUint64(valueHelp.asUint64(val))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 05132751..ec346e20 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -13,29 +13,41 @@
Aaron Hopkins
Achille Roussel
+Aidan
Alex Snast
Alexey Palazhchenko
Andrew Reid
Animesh Ray
Arne Hormann
Ariel Mashraki
+Artur Melanchyk
Asta Xie
+B Lamarche
+Bes Dollma
+Bogdan Constantinescu
+Brad Higgins
+Brian Hendriks
Bulat Gaifullin
Caine Jette
Carlos Nieto
Chris Kirkland
Chris Moos
Craig Wilson
+Daemonxiao <735462752 at qq.com>
Daniel Montoya
Daniel Nichter
Daniël van Eeden
Dave Protasowski
+Diego Dupin
+Dirkjan Bussink
DisposaBoy
Egor Smolyakov
Erwan Martin
+Evan Elias
Evan Shaw
Frederick Mayle
Gustavo Kristic
+Gusted
Hajime Nakagami
Hanno Braun
Henri Yandell
@@ -45,13 +57,18 @@ ICHINOSE Shogo
Ilia Cimpoes
INADA Naoki
Jacek Szwec
+Jakub Adamus
James Harr
Janek Vedock
+Jason Ng
+Jean-Yves Pellé
Jeff Hodges
Jeffrey Charles
+Jennifer Purevsuren
Jerome Meyer
Jiajia Zhong
Jian Zhen
+Joe Mann
Joshua Prunier
Julien Lefevre
Julien Schmidt
@@ -72,17 +89,23 @@ Lunny Xiao
Luke Scott
Maciej Zimnoch
Michael Woolnough
+Nao Yokotsuka
Nathanial Murphy
Nicola Peduzzi
+Oliver Bone
Olivier Mengué
oscarzhao
Paul Bonser
+Paulius Lozys
Peter Schultz
+Phil Porada
+Minh Quang
Rebecca Chin
Reed Allman
Richard Wilkes
Robert Russell
Runrioter Wung
+Samantha Frank
Santhosh Kumar Tekuri
Sho Iizuka
Sho Ikeda
@@ -93,6 +116,7 @@ Stan Putrya
Stanley Gunawan
Steven Hartland
Tan Jinhua <312841925 at qq.com>
+Tetsuro Aoki
Thomas Wodarek
Tim Ruffles
Tom Jenkinson
@@ -102,6 +126,7 @@ Xiangyu Hu
Xiaobing Jiang
Xiuming Chen
Xuehong Chan
+Zhang Xiang
Zhenye Xie
Zhixin Wen
Ziheng Lyu
@@ -110,15 +135,21 @@ Ziheng Lyu
Barracuda Networks, Inc.
Counting Ltd.
+Defined Networking Inc.
DigitalOcean Inc.
+Dolthub Inc.
dyves labs AG
Facebook Inc.
GitHub Inc.
Google Inc.
InfoSum Ltd.
Keybase Inc.
+Microsoft Corp.
Multiplay Ltd.
Percona LLC
+PingCAP Inc.
Pivotal Inc.
+Shattered Silicon Ltd.
Stripe Inc.
+ThousandEyes
Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
index 77024a82..75674b60 100644
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -1,3 +1,110 @@
+# Changelog
+
+## v1.9.3 (2025-06-13)
+
+* `tx.Commit()` and `tx.Rollback()` returned `ErrInvalidConn` always.
+ Now they return cached real error if present. (#1690)
+
+* Optimize reading small resultsets to fix performance regression
+ introduced by compression protocol support. (#1707)
+
+* Fix `db.Ping()` on compressed connection. (#1723)
+
+
+## v1.9.2 (2025-04-07)
+
+v1.9.2 is a re-release of v1.9.1 due to a release process issue; no changes were made to the content.
+
+
+## v1.9.1 (2025-03-21)
+
+### Major Changes
+
+* Add Charset() option. (#1679)
+
+### Bugfixes
+
+* go.mod: fix go version format (#1682)
+* Fix FormatDSN missing ConnectionAttributes (#1619)
+
+## v1.9.0 (2025-02-18)
+
+### Major Changes
+
+- Implement zlib compression. (#1487)
+- Supported Go version is updated to Go 1.21+. (#1639)
+- Add support for VECTOR type introduced in MySQL 9.0. (#1609)
+- Config object can have custom dial function. (#1527)
+
+### Bugfixes
+
+- Fix auth errors when username/password are too long. (#1625)
+- Check if MySQL supports CLIENT_CONNECT_ATTRS before sending client attributes. (#1640)
+- Fix auth switch request handling. (#1666)
+
+### Other changes
+
+- Add "filename:line" prefix to log in go-mysql. Custom loggers now show it. (#1589)
+- Improve error handling. It reduces the "busy buffer" errors. (#1595, #1601, #1641)
+- Use `strconv.Atoi` to parse max_allowed_packet. (#1661)
+- `rejectReadOnly` option now handles ER_READ_ONLY_MODE (1290) error too. (#1660)
+
+
+## Version 1.8.1 (2024-03-26)
+
+Bugfixes:
+
+- fix race condition when context is canceled in [#1562](https://github.com/go-sql-driver/mysql/pull/1562) and [#1570](https://github.com/go-sql-driver/mysql/pull/1570)
+
+## Version 1.8.0 (2024-03-09)
+
+Major Changes:
+
+- Use `SET NAMES charset COLLATE collation`. by @methane in [#1437](https://github.com/go-sql-driver/mysql/pull/1437)
+ - Older go-mysql-driver used `collation_id` in the handshake packet. But it caused collation mismatch in some situation.
+ - If you don't specify charset nor collation, go-mysql-driver sends `SET NAMES utf8mb4` for new connection. This uses server's default collation for utf8mb4.
+ - If you specify charset, go-mysql-driver sends `SET NAMES `. This uses the server's default collation for ``.
+ - If you specify collation and/or charset, go-mysql-driver sends `SET NAMES charset COLLATE collation`.
+- PathEscape dbname in DSN. by @methane in [#1432](https://github.com/go-sql-driver/mysql/pull/1432)
+ - This is backward incompatible in rare case. Check your DSN.
+- Drop Go 1.13-17 support by @methane in [#1420](https://github.com/go-sql-driver/mysql/pull/1420)
+ - Use Go 1.18+
+- Parse numbers on text protocol too by @methane in [#1452](https://github.com/go-sql-driver/mysql/pull/1452)
+ - When text protocol is used, go-mysql-driver passed bare `[]byte` to database/sql for avoid unnecessary allocation and conversion.
+ - If user specified `*any` to `Scan()`, database/sql passed the `[]byte` into the target variable.
+ - This confused users because most user doesn't know when text/binary protocol used.
+ - go-mysql-driver 1.8 converts integer/float values into int64/double even in text protocol. This doesn't increase allocation compared to `[]byte` and conversion cost is negatable.
+- New options start using the Functional Option Pattern to avoid increasing technical debt in the Config object. Future version may introduce Functional Option for existing options, but not for now.
+ - Make TimeTruncate functional option by @methane in [1552](https://github.com/go-sql-driver/mysql/pull/1552)
+ - Add BeforeConnect callback to configuration object by @ItalyPaleAle in [#1469](https://github.com/go-sql-driver/mysql/pull/1469)
+
+
+Other changes:
+
+- Adding DeregisterDialContext to prevent memory leaks with dialers we don't need anymore by @jypelle in https://github.com/go-sql-driver/mysql/pull/1422
+- Make logger configurable per connection by @frozenbonito in https://github.com/go-sql-driver/mysql/pull/1408
+- Fix ColumnType.DatabaseTypeName for mediumint unsigned by @evanelias in https://github.com/go-sql-driver/mysql/pull/1428
+- Add connection attributes by @Daemonxiao in https://github.com/go-sql-driver/mysql/pull/1389
+- Stop `ColumnTypeScanType()` from returning `sql.RawBytes` by @methane in https://github.com/go-sql-driver/mysql/pull/1424
+- Exec() now provides access to status of multiple statements. by @mherr-google in https://github.com/go-sql-driver/mysql/pull/1309
+- Allow to change (or disable) the default driver name for registration by @dolmen in https://github.com/go-sql-driver/mysql/pull/1499
+- Add default connection attribute '_server_host' by @oblitorum in https://github.com/go-sql-driver/mysql/pull/1506
+- QueryUnescape DSN ConnectionAttribute value by @zhangyangyu in https://github.com/go-sql-driver/mysql/pull/1470
+- Add client_ed25519 authentication by @Gusted in https://github.com/go-sql-driver/mysql/pull/1518
+
+## Version 1.7.1 (2023-04-25)
+
+Changes:
+
+ - bump actions/checkout@v3 and actions/setup-go@v3 (#1375)
+ - Add go1.20 and mariadb10.11 to the testing matrix (#1403)
+ - Increase default maxAllowedPacket size. (#1411)
+
+Bugfixes:
+
+ - Use SET syntax as specified in the MySQL documentation (#1402)
+
+
## Version 1.7 (2022-11-29)
Changes:
@@ -149,7 +256,7 @@ New Features:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Placeholder interpolation, can be activated with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Exported ParseDSN function and the Config struct (#403, #419, #429)
@@ -193,7 +300,7 @@ Changes:
- Also exported the MySQLWarning type
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
- writePacket() automatically writes the packet size to the header
- - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+ - readPacket() uses an iterative approach instead of the recursive approach to merge split packets
New Features:
@@ -241,7 +348,7 @@ Bugfixes:
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- Convert to DB timezone when inserting `time.Time`
- - Splitted packets (more than 16MB) are now merged correctly
+ - Split packets (more than 16MB) are now merged correctly
- Fixed false positive `io.EOF` errors when the data was fully read
- Avoid panics on reuse of closed connections
- Fixed empty string producing false nil values
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index 25de2e5a..da4593cc 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -38,17 +38,26 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
+ * Supports zlib compression.
## Requirements
- * Go 1.13 or higher. We aim to support the 3 latest versions of Go.
- * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+* Go 1.21 or higher. We aim to support the 3 latest versions of Go.
+* MySQL (5.7+) and MariaDB (10.5+) are supported.
+* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
+ * Do not ask questions about TiDB in our issue tracker or forum.
+ * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
+ * [Forum](https://ask.pingcap.com/)
+* go-mysql would work with Percona Server, Google CloudSQL or Sphinx (2.2.3+).
+ * Maintainers won't support them. Do not expect issues are investigated and resolved by maintainers.
+ * Investigate issues yourself and please send a pull request to fix it.
---------------------------------------
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
-$ go get -u github.com/go-sql-driver/mysql
+go get -u github.com/go-sql-driver/mysql
```
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
@@ -114,6 +123,12 @@ This has the same effect as an empty DSN string:
```
+`dbname` is escaped by [PathEscape()](https://pkg.go.dev/net/url#PathEscape) since v1.8.0. If your database name is `dbname/withslash`, it becomes:
+
+```
+/dbname%2Fwithslash
+```
+
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
#### Password
@@ -121,7 +136,7 @@ Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
-In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+In general you should use a Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host[:port]`.
@@ -145,7 +160,7 @@ Default: false
```
`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+[*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)
##### `allowCleartextPasswords`
@@ -194,10 +209,9 @@ Valid Values:
Default: none
```
-Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset fails. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
-Unless you need the fallback behavior, please use `collation` instead.
+See also [Unicode Support](#unicode-support).
##### `checkConnLiveness`
@@ -226,6 +240,7 @@ The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You s
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+See also [Unicode Support](#unicode-support).
##### `clientFoundRows`
@@ -253,6 +268,16 @@ SELECT u.id FROM users as u
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+##### `compress`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Toggles zlib compression. false by default.
+
##### `interpolateParams`
```
@@ -279,13 +304,22 @@ Note that this sets the location for time.Time values but does not change MySQL'
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+##### `timeTruncate`
+
+```
+Type: duration
+Default: 0
+```
+
+[Truncate time values](https://pkg.go.dev/time#Duration.Truncate) to the specified duration. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `maxAllowedPacket`
```
Type: decimal number
-Default: 4194304
+Default: 64*1024*1024
```
-Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+Max packet size allowed in bytes. The default value is 64 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
##### `multiStatements`
@@ -295,9 +329,25 @@ Valid Values: true, false
Default: false
```
-Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+Allow multiple statements in one query. This can be used to bach multiple queries. Use [Rows.NextResultSet()](https://pkg.go.dev/database/sql#Rows.NextResultSet) to get result of the second and subsequent queries.
-When `multiStatements` is used, `?` parameters must only be used in the first statement.
+When `multiStatements` is used, `?` parameters must only be used in the first statement. [interpolateParams](#interpolateparams) can be used to avoid this limitation unless prepared statement is used explicitly.
+
+It's possible to access the last inserted ID and number of affected rows for multiple statements by using `sql.Conn.Raw()` and the `mysql.Result`. For example:
+
+```go
+conn, _ := db.Conn(ctx)
+conn.Raw(func(conn any) error {
+ ex := conn.(driver.Execer)
+ res, err := ex.Exec(`
+ UPDATE point SET x = 1 WHERE y = 2;
+ UPDATE point SET x = 2 WHERE y = 3;
+ `, nil)
+ // Both slices have 2 elements.
+ log.Print(res.(mysql.Result).AllRowsAffected())
+ log.Print(res.(mysql.Result).AllLastInsertIds())
+})
+```
##### `parseTime`
@@ -393,6 +443,15 @@ Default: 0
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+##### `connectionAttributes`
+
+```
+Type: comma-delimited string of user-defined "key:value" pairs
+Valid Values: (:,:,...)
+Default: none
+```
+
+[Connection attributes](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html) are key-value pairs that application programs can pass to the server at connect time.
##### System Variables
@@ -465,12 +524,15 @@ user:password@/
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
## `ColumnType` Support
-This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `BIGINT`.
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `MEDIUMINT`, `BIGINT`.
## `context.Context` Support
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+> [!IMPORTANT]
+> The `QueryContext`, `ExecContext`, etc. variants provided by `database/sql` will cause the connection to be closed if the provided context is cancelled or timed out before the result is received by the driver.
+
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
@@ -478,7 +540,7 @@ For this feature you need direct access to the package. Therefore you must chang
import "github.com/go-sql-driver/mysql"
```
-Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)).
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
@@ -496,9 +558,11 @@ However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` v
### Unicode support
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
-Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+Other charsets / collations can be set using the [`charset`](#charset) or [`collation`](#collation) DSN parameter.
-Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+- When only the `charset` is specified, the `SET NAMES ` query is sent and the server's default collation is used.
+- When both the `charset` and `collation` are specified, the `SET NAMES COLLATE ` query is sent.
+- When only the `collation` is specified, the collation is specified in the protocol handshake and the `SET NAMES` query is not sent. This can save one roundtrip, but note that the server may ignore the specified collation silently and use the server's default charset/collation instead.
See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
deleted file mode 100644
index 1b7e19f3..00000000
--- a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-//go:build go1.19
-// +build go1.19
-
-package mysql
-
-import "sync/atomic"
-
-/******************************************************************************
-* Sync utils *
-******************************************************************************/
-
-type atomicBool = atomic.Bool
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
deleted file mode 100644
index 2e9a7f0b..00000000
--- a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-//go:build !go1.19
-// +build !go1.19
-
-package mysql
-
-import "sync/atomic"
-
-/******************************************************************************
-* Sync utils *
-******************************************************************************/
-
-// atomicBool is an implementation of atomic.Bool for older version of Go.
-// it is a wrapper around uint32 for usage as a boolean value with
-// atomic access.
-type atomicBool struct {
- _ noCopy
- value uint32
-}
-
-// Load returns whether the current boolean value is true
-func (ab *atomicBool) Load() bool {
- return atomic.LoadUint32(&ab.value) > 0
-}
-
-// Store sets the value of the bool regardless of the previous value
-func (ab *atomicBool) Store(value bool) {
- if value {
- atomic.StoreUint32(&ab.value, 1)
- } else {
- atomic.StoreUint32(&ab.value, 0)
- }
-}
-
-// Swap sets the value of the bool and returns the old value.
-func (ab *atomicBool) Swap(value bool) bool {
- if value {
- return atomic.SwapUint32(&ab.value, 1) > 0
- }
- return atomic.SwapUint32(&ab.value, 0) > 0
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
index 1ff203e5..74e1bd03 100644
--- a/vendor/github.com/go-sql-driver/mysql/auth.go
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -13,10 +13,13 @@ import (
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
+ "crypto/sha512"
"crypto/x509"
"encoding/pem"
"fmt"
"sync"
+
+ "filippo.io/edwards25519"
)
// server pub keys registry
@@ -33,7 +36,7 @@ var (
// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
// after registering it and may not be modified.
//
-// data, err := ioutil.ReadFile("mykey.pem")
+// data, err := os.ReadFile("mykey.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -225,6 +228,44 @@ func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte,
return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
}
+// authEd25519 does ed25519 authentication used by MariaDB.
+func authEd25519(scramble []byte, password string) ([]byte, error) {
+ // Derived from https://github.com/MariaDB/server/blob/d8e6bb00888b1f82c031938f4c8ac5d97f6874c3/plugin/auth_ed25519/ref10/sign.c
+ // Code style is from https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/crypto/ed25519/ed25519.go;l=207
+ h := sha512.Sum512([]byte(password))
+
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ return nil, err
+ }
+ A := (&edwards25519.Point{}).ScalarBaseMult(s)
+
+ mh := sha512.New()
+ mh.Write(h[32:])
+ mh.Write(scramble)
+ messageDigest := mh.Sum(nil)
+ r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ R := (&edwards25519.Point{}).ScalarBaseMult(r)
+
+ kh := sha512.New()
+ kh.Write(R.Bytes())
+ kh.Write(A.Bytes())
+ kh.Write(scramble)
+ hramDigest := kh.Sum(nil)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ S := k.MultiplyAdd(k, s, r)
+
+ return append(R.Bytes(), S.Bytes()...), nil
+}
+
func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
if err != nil {
@@ -290,8 +331,14 @@ func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
return enc, err
+ case "client_ed25519":
+ if len(authData) != 32 {
+ return nil, ErrMalformPkt
+ }
+ return authEd25519(authData, mc.cfg.Passwd)
+
default:
- errLog.Print("unknown auth plugin:", plugin)
+ mc.log("unknown auth plugin:", plugin)
return nil, ErrUnknownPlugin
}
}
@@ -338,7 +385,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
switch plugin {
- // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ // https://dev.mysql.com/blog-archive/preparing-your-community-connector-for-mysql-8-part-2-sha256/
case "caching_sha2_password":
switch len(authData) {
case 0:
@@ -346,7 +393,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
case 1:
switch authData[0] {
case cachingSha2PasswordFastAuthSuccess:
- if err = mc.readResultOK(); err == nil {
+ if err = mc.resultUnchanged().readResultOK(); err == nil {
return nil // auth successful
}
@@ -376,13 +423,13 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
}
if data[0] != iAuthMoreData {
- return fmt.Errorf("unexpect resp from server for caching_sha2_password perform full authentication")
+ return fmt.Errorf("unexpected resp from server for caching_sha2_password, perform full authentication")
}
// parse public key
block, rest := pem.Decode(data[1:])
if block == nil {
- return fmt.Errorf("No Pem data found, data: %s", rest)
+ return fmt.Errorf("no pem data found, data: %s", rest)
}
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
@@ -397,7 +444,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
return err
}
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
default:
return ErrMalformPkt
@@ -426,7 +473,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
if err != nil {
return err
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
}
default:
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
index 0774c5c8..f895e87b 100644
--- a/vendor/github.com/go-sql-driver/mysql/buffer.go
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -10,54 +10,47 @@ package mysql
import (
"io"
- "net"
- "time"
)
const defaultBufSize = 4096
const maxCachedBufSize = 256 * 1024
+// readerFunc is a function that compatible with io.Reader.
+// We use this function type instead of io.Reader because we want to
+// just pass mc.readWithTimeout.
+type readerFunc func([]byte) (int, error)
+
// A buffer which is used for both reading and writing.
// This is possible since communication on each connection is synchronous.
// In other words, we can't write and read simultaneously on the same connection.
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
-// This buffer is backed by two byte slices in a double-buffering scheme
type buffer struct {
- buf []byte // buf is a byte buffer who's length and capacity are equal.
- nc net.Conn
- idx int
- length int
- timeout time.Duration
- dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
- flipcnt uint // flipccnt is the current buffer counter for double-buffering
+ buf []byte // read buffer.
+ cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize.
}
// newBuffer allocates and returns a new buffer.
-func newBuffer(nc net.Conn) buffer {
- fg := make([]byte, defaultBufSize)
+func newBuffer() buffer {
return buffer{
- buf: fg,
- nc: nc,
- dbuf: [2][]byte{fg, nil},
+ cachedBuf: make([]byte, defaultBufSize),
}
}
-// flip replaces the active buffer with the background buffer
-// this is a delayed flip that simply increases the buffer counter;
-// the actual flip will be performed the next time we call `buffer.fill`
-func (b *buffer) flip() {
- b.flipcnt += 1
+// busy returns true if the read buffer is not empty.
+func (b *buffer) busy() bool {
+ return len(b.buf) > 0
}
-// fill reads into the buffer until at least _need_ bytes are in it
-func (b *buffer) fill(need int) error {
- n := b.length
- // fill data into its double-buffering target: if we've called
- // flip on this buffer, we'll be copying to the background buffer,
- // and then filling it with network data; otherwise we'll just move
- // the contents of the current buffer to the front before filling it
- dest := b.dbuf[b.flipcnt&1]
+// len returns how many bytes in the read buffer.
+func (b *buffer) len() int {
+ return len(b.buf)
+}
+
+// fill reads into the read buffer until at least _need_ bytes are in it.
+func (b *buffer) fill(need int, r readerFunc) error {
+ // we'll move the contents of the current buffer to dest before filling it.
+ dest := b.cachedBuf
// grow buffer if necessary to fit the whole packet.
if need > len(dest) {
@@ -67,64 +60,41 @@ func (b *buffer) fill(need int) error {
// if the allocated buffer is not too large, move it to backing storage
// to prevent extra allocations on applications that perform large reads
if len(dest) <= maxCachedBufSize {
- b.dbuf[b.flipcnt&1] = dest
+ b.cachedBuf = dest
}
}
- // if we're filling the fg buffer, move the existing data to the start of it.
- // if we're filling the bg buffer, copy over the data
- if n > 0 {
- copy(dest[:n], b.buf[b.idx:])
- }
-
- b.buf = dest
- b.idx = 0
+ // move the existing data to the start of the buffer.
+ n := len(b.buf)
+ copy(dest[:n], b.buf)
for {
- if b.timeout > 0 {
- if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
- return err
- }
- }
-
- nn, err := b.nc.Read(b.buf[n:])
+ nn, err := r(dest[n:])
n += nn
- switch err {
- case nil:
- if n < need {
- continue
- }
- b.length = n
- return nil
-
- case io.EOF:
- if n >= need {
- b.length = n
- return nil
- }
- return io.ErrUnexpectedEOF
-
- default:
- return err
+ if err == nil && n < need {
+ continue
}
+
+ b.buf = dest[:n]
+
+ if err == io.EOF {
+ if n < need {
+ err = io.ErrUnexpectedEOF
+ } else {
+ err = nil
+ }
+ }
+ return err
}
}
// returns next N bytes from buffer.
// The returned slice is only guaranteed to be valid until the next read
-func (b *buffer) readNext(need int) ([]byte, error) {
- if b.length < need {
- // refill
- if err := b.fill(need); err != nil {
- return nil, err
- }
- }
-
- offset := b.idx
- b.idx += need
- b.length -= need
- return b.buf[offset:b.idx], nil
+func (b *buffer) readNext(need int) []byte {
+ data := b.buf[:need:need]
+ b.buf = b.buf[need:]
+ return data
}
// takeBuffer returns a buffer with the requested size.
@@ -132,18 +102,18 @@ func (b *buffer) readNext(need int) ([]byte, error) {
// Otherwise a bigger buffer is made.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeBuffer(length int) ([]byte, error) {
- if b.length > 0 {
+ if b.busy() {
return nil, ErrBusyBuffer
}
// test (cheap) general case first
- if length <= cap(b.buf) {
- return b.buf[:length], nil
+ if length <= len(b.cachedBuf) {
+ return b.cachedBuf[:length], nil
}
- if length < maxPacketSize {
- b.buf = make([]byte, length)
- return b.buf, nil
+ if length < maxCachedBufSize {
+ b.cachedBuf = make([]byte, length)
+ return b.cachedBuf, nil
}
// buffer is larger than we want to store.
@@ -154,10 +124,10 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) {
// known to be smaller than defaultBufSize.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
- if b.length > 0 {
+ if b.busy() {
return nil, ErrBusyBuffer
}
- return b.buf[:length], nil
+ return b.cachedBuf[:length], nil
}
// takeCompleteBuffer returns the complete existing buffer.
@@ -165,18 +135,15 @@ func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
// cap and len of the returned buffer will be equal.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
- if b.length > 0 {
+ if b.busy() {
return nil, ErrBusyBuffer
}
- return b.buf, nil
+ return b.cachedBuf, nil
}
// store stores buf, an updated buffer, if its suitable to do so.
-func (b *buffer) store(buf []byte) error {
- if b.length > 0 {
- return ErrBusyBuffer
- } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
- b.buf = buf[:cap(buf)]
+func (b *buffer) store(buf []byte) {
+ if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) {
+ b.cachedBuf = buf[:cap(buf)]
}
- return nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
index 295bfbe5..29b1aa43 100644
--- a/vendor/github.com/go-sql-driver/mysql/collations.go
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -8,8 +8,8 @@
package mysql
-const defaultCollation = "utf8mb4_general_ci"
-const binaryCollation = "binary"
+const defaultCollationID = 45 // utf8mb4_general_ci
+const binaryCollationID = 63
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
diff --git a/vendor/github.com/go-sql-driver/mysql/compress.go b/vendor/github.com/go-sql-driver/mysql/compress.go
new file mode 100644
index 00000000..38bfa000
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/compress.go
@@ -0,0 +1,213 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2024 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "compress/zlib"
+ "fmt"
+ "io"
+ "sync"
+)
+
+var (
+ zrPool *sync.Pool // Do not use directly. Use zDecompress() instead.
+ zwPool *sync.Pool // Do not use directly. Use zCompress() instead.
+)
+
+func init() {
+ zrPool = &sync.Pool{
+ New: func() any { return nil },
+ }
+ zwPool = &sync.Pool{
+ New: func() any {
+ zw, err := zlib.NewWriterLevel(new(bytes.Buffer), 2)
+ if err != nil {
+ panic(err) // compress/zlib return non-nil error only if level is invalid
+ }
+ return zw
+ },
+ }
+}
+
+func zDecompress(src []byte, dst *bytes.Buffer) (int, error) {
+ br := bytes.NewReader(src)
+ var zr io.ReadCloser
+ var err error
+
+ if a := zrPool.Get(); a == nil {
+ if zr, err = zlib.NewReader(br); err != nil {
+ return 0, err
+ }
+ } else {
+ zr = a.(io.ReadCloser)
+ if err := zr.(zlib.Resetter).Reset(br, nil); err != nil {
+ return 0, err
+ }
+ }
+
+ n, _ := dst.ReadFrom(zr) // ignore err because zr.Close() will return it again.
+ err = zr.Close() // zr.Close() may return chuecksum error.
+ zrPool.Put(zr)
+ return int(n), err
+}
+
+func zCompress(src []byte, dst io.Writer) error {
+ zw := zwPool.Get().(*zlib.Writer)
+ zw.Reset(dst)
+ if _, err := zw.Write(src); err != nil {
+ return err
+ }
+ err := zw.Close()
+ zwPool.Put(zw)
+ return err
+}
+
+type compIO struct {
+ mc *mysqlConn
+ buff bytes.Buffer
+}
+
+func newCompIO(mc *mysqlConn) *compIO {
+ return &compIO{
+ mc: mc,
+ }
+}
+
+func (c *compIO) reset() {
+ c.buff.Reset()
+}
+
+func (c *compIO) readNext(need int) ([]byte, error) {
+ for c.buff.Len() < need {
+ if err := c.readCompressedPacket(); err != nil {
+ return nil, err
+ }
+ }
+ data := c.buff.Next(need)
+ return data[:need:need], nil // prevent caller writes into c.buff
+}
+
+func (c *compIO) readCompressedPacket() error {
+ header, err := c.mc.readNext(7)
+ if err != nil {
+ return err
+ }
+ _ = header[6] // bounds check hint to compiler; guaranteed by readNext
+
+ // compressed header structure
+ comprLength := getUint24(header[0:3])
+ compressionSequence := header[3]
+ uncompressedLength := getUint24(header[4:7])
+ if debug {
+ fmt.Printf("uncompress cmplen=%v uncomplen=%v pkt_cmp_seq=%v expected_cmp_seq=%v\n",
+ comprLength, uncompressedLength, compressionSequence, c.mc.sequence)
+ }
+ // Do not return ErrPktSync here.
+ // Server may return error packet (e.g. 1153 Got a packet bigger than 'max_allowed_packet' bytes)
+ // before receiving all packets from client. In this case, seqnr is younger than expected.
+ // NOTE: Both of mariadbclient and mysqlclient do not check seqnr. Only server checks it.
+ if debug && compressionSequence != c.mc.compressSequence {
+ fmt.Printf("WARN: unexpected cmpress seq nr: expected %v, got %v",
+ c.mc.compressSequence, compressionSequence)
+ }
+ c.mc.compressSequence = compressionSequence + 1
+
+ comprData, err := c.mc.readNext(comprLength)
+ if err != nil {
+ return err
+ }
+
+ // if payload is uncompressed, its length will be specified as zero, and its
+ // true length is contained in comprLength
+ if uncompressedLength == 0 {
+ c.buff.Write(comprData)
+ return nil
+ }
+
+ // use existing capacity in bytesBuf if possible
+ c.buff.Grow(uncompressedLength)
+ nread, err := zDecompress(comprData, &c.buff)
+ if err != nil {
+ return err
+ }
+ if nread != uncompressedLength {
+ return fmt.Errorf("invalid compressed packet: uncompressed length in header is %d, actual %d",
+ uncompressedLength, nread)
+ }
+ return nil
+}
+
+const minCompressLength = 150
+const maxPayloadLen = maxPacketSize - 4
+
+// writePackets sends one or some packets with compression.
+// Use this instead of mc.netConn.Write() when mc.compress is true.
+func (c *compIO) writePackets(packets []byte) (int, error) {
+ totalBytes := len(packets)
+ blankHeader := make([]byte, 7)
+ buf := &c.buff
+
+ for len(packets) > 0 {
+ payloadLen := min(maxPayloadLen, len(packets))
+ payload := packets[:payloadLen]
+ uncompressedLen := payloadLen
+
+ buf.Reset()
+ buf.Write(blankHeader) // Buffer.Write() never returns error
+
+ // If payload is less than minCompressLength, don't compress.
+ if uncompressedLen < minCompressLength {
+ buf.Write(payload)
+ uncompressedLen = 0
+ } else {
+ err := zCompress(payload, buf)
+ if debug && err != nil {
+ fmt.Printf("zCompress error: %v", err)
+ }
+ // do not compress if compressed data is larger than uncompressed data
+ // I intentionally miss 7 byte header in the buf; zCompress must compress more than 7 bytes.
+ if err != nil || buf.Len() >= uncompressedLen {
+ buf.Reset()
+ buf.Write(blankHeader)
+ buf.Write(payload)
+ uncompressedLen = 0
+ }
+ }
+
+ if n, err := c.writeCompressedPacket(buf.Bytes(), uncompressedLen); err != nil {
+ // To allow returning ErrBadConn when sending really 0 bytes, we sum
+ // up compressed bytes that is returned by underlying Write().
+ return totalBytes - len(packets) + n, err
+ }
+ packets = packets[payloadLen:]
+ }
+
+ return totalBytes, nil
+}
+
+// writeCompressedPacket writes a compressed packet with header.
+// data should start with 7 size space for header followed by payload.
+func (c *compIO) writeCompressedPacket(data []byte, uncompressedLen int) (int, error) {
+ mc := c.mc
+ comprLength := len(data) - 7
+ if debug {
+ fmt.Printf(
+ "writeCompressedPacket: comprLength=%v, uncompressedLen=%v, seq=%v\n",
+ comprLength, uncompressedLen, mc.compressSequence)
+ }
+
+ // compression header
+ putUint24(data[0:3], comprLength)
+ data[3] = mc.compressSequence
+ putUint24(data[4:7], uncompressedLen)
+
+ mc.compressSequence++
+ return mc.writeWithTimeout(data)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index 9539077c..3e455a3f 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -13,28 +13,32 @@ import (
"database/sql"
"database/sql/driver"
"encoding/json"
+ "fmt"
"io"
"net"
+ "runtime"
"strconv"
"strings"
+ "sync/atomic"
"time"
)
type mysqlConn struct {
buf buffer
netConn net.Conn
- rawConn net.Conn // underlying connection when netConn is TLS connection.
- affectedRows uint64
- insertId uint64
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ result mysqlResult // managed by clearResult() and handleOkPacket().
+ compIO *compIO
cfg *Config
+ connector *connector
maxAllowedPacket int
maxWriteSize int
- writeTimeout time.Duration
flags clientFlag
status statusFlag
sequence uint8
+ compressSequence uint8
parseTime bool
- reset bool // set when the Go SQL package calls ResetSession
+ compress bool
// for context support (Go 1.8+)
watching bool
@@ -42,61 +46,92 @@ type mysqlConn struct {
closech chan struct{}
finished chan<- struct{}
canceled atomicError // set non-nil if conn is canceled
- closed atomicBool // set when conn is closed, before closech is closed
+ closed atomic.Bool // set when conn is closed, before closech is closed
+}
+
+// Helper function to call per-connection logger.
+func (mc *mysqlConn) log(v ...any) {
+ _, filename, lineno, ok := runtime.Caller(1)
+ if ok {
+ pos := strings.LastIndexByte(filename, '/')
+ if pos != -1 {
+ filename = filename[pos+1:]
+ }
+ prefix := fmt.Sprintf("%s:%d ", filename, lineno)
+ v = append([]any{prefix}, v...)
+ }
+
+ mc.cfg.Logger.Print(v...)
+}
+
+func (mc *mysqlConn) readWithTimeout(b []byte) (int, error) {
+ to := mc.cfg.ReadTimeout
+ if to > 0 {
+ if err := mc.netConn.SetReadDeadline(time.Now().Add(to)); err != nil {
+ return 0, err
+ }
+ }
+ return mc.netConn.Read(b)
+}
+
+func (mc *mysqlConn) writeWithTimeout(b []byte) (int, error) {
+ to := mc.cfg.WriteTimeout
+ if to > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(to)); err != nil {
+ return 0, err
+ }
+ }
+ return mc.netConn.Write(b)
+}
+
+func (mc *mysqlConn) resetSequence() {
+ mc.sequence = 0
+ mc.compressSequence = 0
+}
+
+// syncSequence must be called when finished writing some packet and before start reading.
+func (mc *mysqlConn) syncSequence() {
+ // Syncs compressionSequence to sequence.
+ // This is not documented but done in `net_flush()` in MySQL and MariaDB.
+ // https://github.com/mariadb-corporation/mariadb-connector-c/blob/8228164f850b12353da24df1b93a1e53cc5e85e9/libmariadb/ma_net.c#L170-L171
+ // https://github.com/mysql/mysql-server/blob/824e2b4064053f7daf17d7f3f84b7a3ed92e5fb4/sql-common/net_serv.cc#L293
+ if mc.compress {
+ mc.sequence = mc.compressSequence
+ mc.compIO.reset()
+ }
}
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
var cmdSet strings.Builder
- for param, val := range mc.cfg.Params {
- switch param {
- // Charset: character_set_connection, character_set_client, character_set_results
- case "charset":
- charsets := strings.Split(val, ",")
- for i := range charsets {
- // ignore errors here - a charset may not exist
- err = mc.exec("SET NAMES " + charsets[i])
- if err == nil {
- break
- }
- }
- if err != nil {
- return
- }
- // Other system vars accumulated in a single SET command
- default:
- if cmdSet.Len() == 0 {
- // Heuristic: 29 chars for each other key=value to reduce reallocations
- cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
- cmdSet.WriteString("SET ")
- } else {
- cmdSet.WriteByte(',')
- }
- cmdSet.WriteString(param)
- cmdSet.WriteByte('=')
- cmdSet.WriteString(val)
+ for param, val := range mc.cfg.Params {
+ if cmdSet.Len() == 0 {
+ // Heuristic: 29 chars for each other key=value to reduce reallocations
+ cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.WriteString("SET ")
+ } else {
+ cmdSet.WriteString(", ")
}
+ cmdSet.WriteString(param)
+ cmdSet.WriteString(" = ")
+ cmdSet.WriteString(val)
}
if cmdSet.Len() > 0 {
err = mc.exec(cmdSet.String())
- if err != nil {
- return
- }
}
return
}
+// markBadConn replaces errBadConnNoWrite with driver.ErrBadConn.
+// This function is used to return driver.ErrBadConn only when safe to retry.
func (mc *mysqlConn) markBadConn(err error) error {
- if mc == nil {
- return err
+ if err == errBadConnNoWrite {
+ return driver.ErrBadConn
}
- if err != errBadConnNoWrite {
- return err
- }
- return driver.ErrBadConn
+ return err
}
func (mc *mysqlConn) Begin() (driver.Tx, error) {
@@ -105,7 +140,6 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) {
func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
var q string
@@ -126,12 +160,16 @@ func (mc *mysqlConn) Close() (err error) {
if !mc.closed.Load() {
err = mc.writeCommandPacket(comQuit)
}
-
- mc.cleanup()
-
+ mc.close()
return
}
+// close closes the network connection and clear results without sending COM_QUIT.
+func (mc *mysqlConn) close() {
+ mc.cleanup()
+ mc.clearResult()
+}
+
// Closes the network connection and unsets internal variables. Do not call this
// function after successfully authentication, call Close instead. This function
// is called before auth or on auth failure because MySQL will have already
@@ -143,12 +181,16 @@ func (mc *mysqlConn) cleanup() {
// Makes cleanup idempotent
close(mc.closech)
- if mc.netConn == nil {
+ conn := mc.rawConn
+ if conn == nil {
return
}
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
+ if err := conn.Close(); err != nil {
+ mc.log("closing connection:", err)
}
+ // This function can be called from multiple goroutines.
+ // So we can not mc.clearResult() here.
+ // Caller should do it if they are in safe goroutine.
}
func (mc *mysqlConn) error() error {
@@ -163,14 +205,13 @@ func (mc *mysqlConn) error() error {
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
// STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
- errLog.Print(err)
+ mc.log(err)
return nil, driver.ErrBadConn
}
@@ -204,8 +245,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf, err := mc.buf.takeCompleteBuffer()
if err != nil {
// can not take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return "", ErrInvalidConn
+ mc.cleanup()
+ // interpolateParams would be called before sending any query.
+ // So its safe to retry.
+ return "", driver.ErrBadConn
}
buf = buf[:0]
argPos := 0
@@ -246,7 +289,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf = append(buf, "'0000-00-00'"...)
} else {
buf = append(buf, '\'')
- buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return "", err
}
@@ -296,7 +339,6 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -310,28 +352,25 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
}
query = prepared
}
- mc.affectedRows = 0
- mc.insertId = 0
err := mc.exec(query)
if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, err
+ copied := mc.result
+ return &copied, err
}
return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
+ handleOk := mc.clearResult()
// Send command
if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
return mc.markBadConn(err)
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return err
}
@@ -348,7 +387,7 @@ func (mc *mysqlConn) exec(query string) error {
}
}
- return mc.discardResults()
+ return handleOk.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
@@ -356,8 +395,9 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
}
func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ handleOk := mc.clearResult()
+
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -373,43 +413,47 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
}
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
- if err == nil {
- // Read Result
- var resLen int
- resLen, err = mc.readResultSetHeaderPacket()
- if err == nil {
- rows := new(textRows)
- rows.mc = mc
+ if err != nil {
+ return nil, mc.markBadConn(err)
+ }
- if resLen == 0 {
- rows.rs.done = true
+ // Read Result
+ var resLen int
+ resLen, err = handleOk.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
- switch err := rows.NextResultSet(); err {
- case nil, io.EOF:
- return rows, nil
- default:
- return nil, err
- }
- }
+ rows := new(textRows)
+ rows.mc = mc
- // Columns
- rows.rs.columns, err = mc.readColumns(resLen)
- return rows, err
+ if resLen == 0 {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
}
}
- return nil, mc.markBadConn(err)
+
+ // Columns
+ rows.rs.columns, err = mc.readColumns(resLen)
+ return rows, err
}
// Gets the value of the given MySQL System Variable
// The returned byte slice is only valid until the next read
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
// Send command
+ handleOk := mc.clearResult()
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
return nil, err
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
@@ -430,7 +474,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
return nil, err
}
-// finish is called when the query has canceled.
+// cancel is called when the query has canceled.
func (mc *mysqlConn) cancel(err error) {
mc.canceled.Set(err)
mc.cleanup()
@@ -451,7 +495,6 @@ func (mc *mysqlConn) finish() {
// Ping implements driver.Pinger interface
func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return driver.ErrBadConn
}
@@ -460,11 +503,12 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
}
defer mc.finish()
+ handleOk := mc.clearResult()
if err = mc.writeCommandPacket(comPing); err != nil {
return mc.markBadConn(err)
}
- return mc.readResultOK()
+ return handleOk.readResultOK()
}
// BeginTx implements driver.ConnBeginTx interface
@@ -636,15 +680,42 @@ func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
// ResetSession implements driver.SessionResetter.
// (From Go 1.10)
func (mc *mysqlConn) ResetSession(ctx context.Context) error {
- if mc.closed.Load() {
+ if mc.closed.Load() || mc.buf.busy() {
return driver.ErrBadConn
}
- mc.reset = true
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.cfg.CheckConnLiveness {
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
+ }
+ if err == nil {
+ err = connCheck(conn)
+ }
+ if err != nil {
+ mc.log("closing bad idle connection: ", err)
+ return driver.ErrBadConn
+ }
+ }
+
return nil
}
// IsValid implements driver.Validator interface
// (From Go 1.15)
func (mc *mysqlConn) IsValid() bool {
- return !mc.closed.Load()
+ return !mc.closed.Load() && !mc.buf.busy()
}
+
+var _ driver.SessionResetter = &mysqlConn{}
+var _ driver.Validator = &mysqlConn{}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
index d567b4e4..bc1d46af 100644
--- a/vendor/github.com/go-sql-driver/mysql/connector.go
+++ b/vendor/github.com/go-sql-driver/mysql/connector.go
@@ -11,11 +11,55 @@ package mysql
import (
"context"
"database/sql/driver"
+ "fmt"
"net"
+ "os"
+ "strconv"
+ "strings"
)
type connector struct {
- cfg *Config // immutable private copy.
+ cfg *Config // immutable private copy.
+ encodedAttributes string // Encoded connection attributes.
+}
+
+func encodeConnectionAttributes(cfg *Config) string {
+ connAttrsBuf := make([]byte, 0)
+
+ // default connection attributes
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientName)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientNameValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOS)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOSValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatform)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatformValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPid)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, strconv.Itoa(os.Getpid()))
+ serverHost, _, _ := net.SplitHostPort(cfg.Addr)
+ if serverHost != "" {
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrServerHost)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, serverHost)
+ }
+
+ // user-defined connection attributes
+ for _, connAttr := range strings.Split(cfg.ConnectionAttributes, ",") {
+ k, v, found := strings.Cut(connAttr, ":")
+ if !found {
+ continue
+ }
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, k)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, v)
+ }
+
+ return string(connAttrsBuf)
+}
+
+func newConnector(cfg *Config) *connector {
+ encodedAttributes := encodeConnectionAttributes(cfg)
+ return &connector{
+ cfg: cfg,
+ encodedAttributes: encodedAttributes,
+ }
}
// Connect implements driver.Connector interface.
@@ -23,43 +67,56 @@ type connector struct {
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
var err error
+ // Invoke beforeConnect if present, with a copy of the configuration
+ cfg := c.cfg
+ if c.cfg.beforeConnect != nil {
+ cfg = c.cfg.Clone()
+ err = c.cfg.beforeConnect(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
// New mysqlConn
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
closech: make(chan struct{}),
- cfg: c.cfg,
+ cfg: cfg,
+ connector: c,
}
mc.parseTime = mc.cfg.ParseTime
// Connect to Server
- dialsLock.RLock()
- dial, ok := dials[mc.cfg.Net]
- dialsLock.RUnlock()
- if ok {
- dctx := ctx
- if mc.cfg.Timeout > 0 {
- var cancel context.CancelFunc
- dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
- defer cancel()
- }
- mc.netConn, err = dial(dctx, mc.cfg.Addr)
- } else {
- nd := net.Dialer{Timeout: mc.cfg.Timeout}
- mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
+ dctx := ctx
+ if mc.cfg.Timeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
+ defer cancel()
}
+ if c.cfg.DialFunc != nil {
+ mc.netConn, err = c.cfg.DialFunc(dctx, mc.cfg.Net, mc.cfg.Addr)
+ } else {
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
+ mc.netConn, err = dial(dctx, mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{}
+ mc.netConn, err = nd.DialContext(dctx, mc.cfg.Net, mc.cfg.Addr)
+ }
+ }
if err != nil {
return nil, err
}
+ mc.rawConn = mc.netConn
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
- // Don't send COM_QUIT before handshake.
- mc.netConn.Close()
- mc.netConn = nil
- return nil, err
+ c.cfg.Logger.Print(err)
}
}
@@ -71,11 +128,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
}
defer mc.finish()
- mc.buf = newBuffer(mc.netConn)
-
- // Set I/O timeouts
- mc.buf.timeout = mc.cfg.ReadTimeout
- mc.writeTimeout = mc.cfg.WriteTimeout
+ mc.buf = newBuffer()
// Reading Handshake Initialization Packet
authData, plugin, err := mc.readHandshakePacket()
@@ -92,7 +145,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
authResp, err := mc.auth(authData, plugin)
if err != nil {
// try the default auth plugin, if using the requested plugin failed
- errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ c.cfg.Logger.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
plugin = defaultAuthPlugin
authResp, err = mc.auth(authData, plugin)
if err != nil {
@@ -114,6 +167,10 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
return nil, err
}
+ if mc.cfg.compress && mc.flags&clientCompress == clientCompress {
+ mc.compress = true
+ mc.compIO = newCompIO(mc)
+ }
if mc.cfg.MaxAllowedPacket > 0 {
mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
} else {
@@ -123,12 +180,36 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
mc.Close()
return nil, err
}
- mc.maxAllowedPacket = stringToInt(maxap) - 1
+ n, err := strconv.Atoi(string(maxap))
+ if err != nil {
+ mc.Close()
+ return nil, fmt.Errorf("invalid max_allowed_packet value (%q): %w", maxap, err)
+ }
+ mc.maxAllowedPacket = n - 1
}
if mc.maxAllowedPacket < maxPacketSize {
mc.maxWriteSize = mc.maxAllowedPacket
}
+ // Charset: character_set_connection, character_set_client, character_set_results
+ if len(mc.cfg.charsets) > 0 {
+ for _, cs := range mc.cfg.charsets {
+ // ignore errors here - a charset may not exist
+ if mc.cfg.Collation != "" {
+ err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation)
+ } else {
+ err = mc.exec("SET NAMES " + cs)
+ }
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ }
+
// Handle DSN Params
err = mc.handleParams()
if err != nil {
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
index b1e6b85e..4aadcd64 100644
--- a/vendor/github.com/go-sql-driver/mysql/const.go
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -8,12 +8,27 @@
package mysql
+import "runtime"
+
const (
+ debug = false // for debugging. Set true only in development.
+
defaultAuthPlugin = "mysql_native_password"
- defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
+
+ // Connection attributes
+ // See https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html#performance-schema-connection-attributes-available
+ connAttrClientName = "_client_name"
+ connAttrClientNameValue = "Go-MySQL-Driver"
+ connAttrOS = "_os"
+ connAttrOSValue = runtime.GOOS
+ connAttrPlatform = "_platform"
+ connAttrPlatformValue = runtime.GOARCH
+ connAttrPid = "_pid"
+ connAttrServerHost = "_server_host"
)
// MySQL constants documentation:
@@ -112,7 +127,10 @@ const (
fieldTypeBit
)
const (
- fieldTypeJSON fieldType = iota + 0xf5
+ fieldTypeVector fieldType = iota + 0xf2
+ fieldTypeInvalid
+ fieldTypeBool
+ fieldTypeJSON
fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index ad7aec21..105316b8 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -55,6 +55,15 @@ func RegisterDialContext(net string, dial DialContextFunc) {
dials[net] = dial
}
+// DeregisterDialContext removes the custom dial function registered with the given net.
+func DeregisterDialContext(net string) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials != nil {
+ delete(dials, net)
+ }
+}
+
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
@@ -74,14 +83,18 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
if err != nil {
return nil, err
}
- c := &connector{
- cfg: cfg,
- }
+ c := newConnector(cfg)
return c.Connect(context.Background())
}
+// This variable can be replaced with -ldflags like below:
+// go build "-ldflags=-X github.com/go-sql-driver/mysql.driverName=custom"
+var driverName = "mysql"
+
func init() {
- sql.Register("mysql", &MySQLDriver{})
+ if driverName != "" {
+ sql.Register(driverName, &MySQLDriver{})
+ }
}
// NewConnector returns new driver.Connector.
@@ -92,7 +105,7 @@ func NewConnector(cfg *Config) (driver.Connector, error) {
if err := cfg.normalize(); err != nil {
return nil, err
}
- return &connector{cfg: cfg}, nil
+ return newConnector(cfg), nil
}
// OpenConnector implements driver.DriverContext.
@@ -101,7 +114,5 @@ func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
if err != nil {
return nil, err
}
- return &connector{
- cfg: cfg,
- }, nil
+ return newConnector(cfg), nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index 4b71aaab..ecf62567 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -10,6 +10,7 @@ package mysql
import (
"bytes"
+ "context"
"crypto/rsa"
"crypto/tls"
"errors"
@@ -34,22 +35,29 @@ var (
// If a new Config is created instead of being parsed from a DSN string,
// the NewConfig function should be used, which sets default values.
type Config struct {
- User string // Username
- Passwd string // Password (requires User)
- Net string // Network type
- Addr string // Network address (requires Net)
- DBName string // Database name
- Params map[string]string // Connection parameters
- Collation string // Connection collation
- Loc *time.Location // Location for time.Time values
- MaxAllowedPacket int // Max packet size allowed
- ServerPubKey string // Server public key name
- pubKey *rsa.PublicKey // Server public key
- TLSConfig string // TLS configuration name
- TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
- Timeout time.Duration // Dial timeout
- ReadTimeout time.Duration // I/O read timeout
- WriteTimeout time.Duration // I/O write timeout
+ // non boolean fields
+
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network (e.g. "tcp", "tcp6", "unix". default: "tcp")
+ Addr string // Address (default: "127.0.0.1:3306" for "tcp" and "/tmp/mysql.sock" for "unix")
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs
+ Collation string // Connection collation. When set, this will be set in SET NAMES COLLATE query
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ TLSConfig string // TLS configuration name
+ TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+ Logger Logger // Logger
+ // DialFunc specifies the dial function for creating connections
+ DialFunc func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // boolean fields
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
AllowCleartextPasswords bool // Allows the cleartext client side plugin
@@ -63,17 +71,83 @@ type Config struct {
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
RejectReadOnly bool // Reject read-only connections
+
+ // unexported fields. new options should be come here.
+ // boolean first. alphabetical order.
+
+ compress bool // Enable zlib compression
+
+ beforeConnect func(context.Context, *Config) error // Invoked before a connection is established
+ pubKey *rsa.PublicKey // Server public key
+ timeTruncate time.Duration // Truncate time.Time values to the specified duration
+ charsets []string // Connection charset. When set, this will be set in SET NAMES query
}
+// Functional Options Pattern
+// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type Option func(*Config) error
+
// NewConfig creates a new Config and sets default values.
func NewConfig() *Config {
- return &Config{
- Collation: defaultCollation,
+ cfg := &Config{
Loc: time.UTC,
MaxAllowedPacket: defaultMaxAllowedPacket,
+ Logger: defaultLogger,
AllowNativePasswords: true,
CheckConnLiveness: true,
}
+ return cfg
+}
+
+// Apply applies the given options to the Config object.
+func (c *Config) Apply(opts ...Option) error {
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TimeTruncate sets the time duration to truncate time.Time values in
+// query parameters.
+func TimeTruncate(d time.Duration) Option {
+ return func(cfg *Config) error {
+ cfg.timeTruncate = d
+ return nil
+ }
+}
+
+// BeforeConnect sets the function to be invoked before a connection is established.
+func BeforeConnect(fn func(context.Context, *Config) error) Option {
+ return func(cfg *Config) error {
+ cfg.beforeConnect = fn
+ return nil
+ }
+}
+
+// EnableCompress sets the compression mode.
+func EnableCompression(yes bool) Option {
+ return func(cfg *Config) error {
+ cfg.compress = yes
+ return nil
+ }
+}
+
+// Charset sets the connection charset and collation.
+//
+// charset is the connection charset.
+// collation is the connection collation. It can be null or empty string.
+//
+// When collation is not specified, `SET NAMES ` command is sent when the connection is established.
+// When collation is specified, `SET NAMES COLLATE ` command is sent when the connection is established.
+func Charset(charset, collation string) Option {
+ return func(cfg *Config) error {
+ cfg.charsets = []string{charset}
+ cfg.Collation = collation
+ return nil
+ }
}
func (cfg *Config) Clone() *Config {
@@ -97,7 +171,7 @@ func (cfg *Config) Clone() *Config {
}
func (cfg *Config) normalize() error {
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ if cfg.InterpolateParams && cfg.Collation != "" && unsafeCollations[cfg.Collation] {
return errInvalidDSNUnsafeCollation
}
@@ -153,6 +227,10 @@ func (cfg *Config) normalize() error {
}
}
+ if cfg.Logger == nil {
+ cfg.Logger = defaultLogger
+ }
+
return nil
}
@@ -171,6 +249,8 @@ func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
// FormatDSN formats the given Config into a DSN string which can be passed to
// the driver.
+//
+// Note: use [NewConnector] and [database/sql.OpenDB] to open a connection from a [*Config].
func (cfg *Config) FormatDSN() string {
var buf bytes.Buffer
@@ -196,7 +276,7 @@ func (cfg *Config) FormatDSN() string {
// /dbname
buf.WriteByte('/')
- buf.WriteString(cfg.DBName)
+ buf.WriteString(url.PathEscape(cfg.DBName))
// [?param1=value1&...¶mN=valueN]
hasParam := false
@@ -230,7 +310,11 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
}
- if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if charsets := cfg.charsets; len(charsets) > 0 {
+ writeDSNParam(&buf, &hasParam, "charset", strings.Join(charsets, ","))
+ }
+
+ if col := cfg.Collation; col != "" {
writeDSNParam(&buf, &hasParam, "collation", col)
}
@@ -238,6 +322,14 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
}
+ if cfg.ConnectionAttributes != "" {
+ writeDSNParam(&buf, &hasParam, "connectionAttributes", url.QueryEscape(cfg.ConnectionAttributes))
+ }
+
+ if cfg.compress {
+ writeDSNParam(&buf, &hasParam, "compress", "true")
+ }
+
if cfg.InterpolateParams {
writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
}
@@ -254,6 +346,10 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "parseTime", "true")
}
+ if cfg.timeTruncate > 0 {
+ writeDSNParam(&buf, &hasParam, "timeTruncate", cfg.timeTruncate.String())
+ }
+
if cfg.ReadTimeout > 0 {
writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
}
@@ -358,7 +454,11 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
break
}
}
- cfg.DBName = dsn[i+1 : j]
+
+ dbname := dsn[i+1 : j]
+ if cfg.DBName, err = url.PathUnescape(dbname); err != nil {
+ return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
+ }
break
}
@@ -378,13 +478,13 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
// Values must be url.QueryEscape'ed
func parseDSNParams(cfg *Config, params string) (err error) {
for _, v := range strings.Split(params, "&") {
- param := strings.SplitN(v, "=", 2)
- if len(param) != 2 {
+ key, value, found := strings.Cut(v, "=")
+ if !found {
continue
}
// cfg params
- switch value := param[1]; param[0] {
+ switch key {
// Disable INFILE allowlist / enable all files
case "allowAllFiles":
var isBool bool
@@ -441,6 +541,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
+ // charset
+ case "charset":
+ cfg.charsets = strings.Split(value, ",")
+
// Collation
case "collation":
cfg.Collation = value
@@ -454,7 +558,11 @@ func parseDSNParams(cfg *Config, params string) (err error) {
// Compression
case "compress":
- return errors.New("compression not implemented yet")
+ var isBool bool
+ cfg.compress, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
// Enable client side placeholder substitution
case "interpolateParams":
@@ -490,6 +598,13 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
+ // time.Time truncation
+ case "timeTruncate":
+ cfg.timeTruncate, err = time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("invalid timeTruncate value: %v, error: %w", value, err)
+ }
+
// I/O read Timeout
case "readTimeout":
cfg.ReadTimeout, err = time.ParseDuration(value)
@@ -554,13 +669,22 @@ func parseDSNParams(cfg *Config, params string) (err error) {
if err != nil {
return
}
+
+ // Connection attributes
+ case "connectionAttributes":
+ connectionAttributes, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid connectionAttributes value: %v", err)
+ }
+ cfg.ConnectionAttributes = connectionAttributes
+
default:
// lazy init
if cfg.Params == nil {
cfg.Params = make(map[string]string)
}
- if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ if cfg.Params[key], err = url.QueryUnescape(value); err != nil {
return
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
index 7c037e7d..584617b1 100644
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -21,36 +21,42 @@ var (
ErrMalformPkt = errors.New("malformed packet")
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
- ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication")
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
ErrPktSync = errors.New("commands out of sync. You can't run this command now")
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
- ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the `Config.MaxAllowedPacket`")
ErrBusyBuffer = errors.New("busy buffer")
// errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
// If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
- // to trigger a resend.
+ // to trigger a resend. Use mc.markBadConn(err) to do this.
// See https://github.com/go-sql-driver/mysql/pull/302
errBadConnNoWrite = errors.New("bad connection")
)
-var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime))
// Logger is used to log critical error messages.
type Logger interface {
- Print(v ...interface{})
+ Print(v ...any)
}
-// SetLogger is used to set the logger for critical errors.
+// NopLogger is a nop implementation of the Logger interface.
+type NopLogger struct{}
+
+// Print implements Logger interface.
+func (nl *NopLogger) Print(_ ...any) {}
+
+// SetLogger is used to set the default logger for critical errors.
// The initial logger is os.Stderr.
func SetLogger(logger Logger) error {
if logger == nil {
return errors.New("logger is nil")
}
- errLog = logger
+ defaultLogger = logger
return nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
index e0654a83..be5cd809 100644
--- a/vendor/github.com/go-sql-driver/mysql/fields.go
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -18,7 +18,7 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeBit:
return "BIT"
case fieldTypeBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TEXT"
}
return "BLOB"
@@ -37,6 +37,9 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeGeometry:
return "GEOMETRY"
case fieldTypeInt24:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED MEDIUMINT"
+ }
return "MEDIUMINT"
case fieldTypeJSON:
return "JSON"
@@ -46,7 +49,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "INT"
case fieldTypeLongBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "LONGTEXT"
}
return "LONGBLOB"
@@ -56,7 +59,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "BIGINT"
case fieldTypeMediumBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "MEDIUMTEXT"
}
return "MEDIUMBLOB"
@@ -74,7 +77,12 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "SMALLINT"
case fieldTypeString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.flags&flagEnum != 0 {
+ return "ENUM"
+ } else if mf.flags&flagSet != 0 {
+ return "SET"
+ }
+ if mf.charSet == binaryCollationID {
return "BINARY"
}
return "CHAR"
@@ -88,43 +96,47 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "TINYINT"
case fieldTypeTinyBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TINYTEXT"
}
return "TINYBLOB"
case fieldTypeVarChar:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeVarString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeYear:
return "YEAR"
+ case fieldTypeVector:
+ return "VECTOR"
default:
return ""
}
}
var (
- scanTypeFloat32 = reflect.TypeOf(float32(0))
- scanTypeFloat64 = reflect.TypeOf(float64(0))
- scanTypeInt8 = reflect.TypeOf(int8(0))
- scanTypeInt16 = reflect.TypeOf(int16(0))
- scanTypeInt32 = reflect.TypeOf(int32(0))
- scanTypeInt64 = reflect.TypeOf(int64(0))
- scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
- scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
- scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
- scanTypeUint8 = reflect.TypeOf(uint8(0))
- scanTypeUint16 = reflect.TypeOf(uint16(0))
- scanTypeUint32 = reflect.TypeOf(uint32(0))
- scanTypeUint64 = reflect.TypeOf(uint64(0))
- scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
- scanTypeUnknown = reflect.TypeOf(new(interface{}))
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeString = reflect.TypeOf("")
+ scanTypeNullString = reflect.TypeOf(sql.NullString{})
+ scanTypeBytes = reflect.TypeOf([]byte{})
+ scanTypeUnknown = reflect.TypeOf(new(any))
)
type mysqlField struct {
@@ -187,12 +199,18 @@ func (mf *mysqlField) scanType() reflect.Type {
}
return scanTypeNullFloat
+ case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB,
+ fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeVector:
+ if mf.charSet == binaryCollationID {
+ return scanTypeBytes
+ }
+ fallthrough
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
- fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
- fieldTypeTime:
- return scanTypeRawBytes
+ fieldTypeEnum, fieldTypeSet, fieldTypeJSON, fieldTypeTime:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeString
+ }
+ return scanTypeNullString
case fieldTypeDate, fieldTypeNewDate,
fieldTypeTimestamp, fieldTypeDateTime:
diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go
deleted file mode 100644
index 3a4ec25a..00000000
--- a/vendor/github.com/go-sql-driver/mysql/fuzz.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-//go:build gofuzz
-// +build gofuzz
-
-package mysql
-
-import (
- "database/sql"
-)
-
-func Fuzz(data []byte) int {
- db, err := sql.Open("mysql", string(data))
- if err != nil {
- return 0
- }
- db.Close()
- return 1
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 3279dcff..453ae091 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -17,7 +17,7 @@ import (
)
var (
- fileRegister map[string]bool
+ fileRegister map[string]struct{}
fileRegisterLock sync.RWMutex
readerRegister map[string]func() io.Reader
readerRegisterLock sync.RWMutex
@@ -37,10 +37,10 @@ func RegisterLocalFile(filePath string) {
fileRegisterLock.Lock()
// lazy map init
if fileRegister == nil {
- fileRegister = make(map[string]bool)
+ fileRegister = make(map[string]struct{})
}
- fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegister[strings.Trim(filePath, `"`)] = struct{}{}
fileRegisterLock.Unlock()
}
@@ -93,9 +93,8 @@ func deferredClose(err *error, closer io.Closer) {
const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
-func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+func (mc *okHandler) handleInFileRequest(name string) (err error) {
var rdr io.Reader
- var data []byte
packetSize := defaultPacketSize
if mc.maxWriteSize < packetSize {
packetSize = mc.maxWriteSize
@@ -116,17 +115,17 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
defer deferredClose(&err, cl)
}
} else {
- err = fmt.Errorf("Reader '%s' is ", name)
+ err = fmt.Errorf("reader '%s' is ", name)
}
} else {
- err = fmt.Errorf("Reader '%s' is not registered", name)
+ err = fmt.Errorf("reader '%s' is not registered", name)
}
} else { // File
name = strings.Trim(name, `"`)
fileRegisterLock.RLock()
- fr := fileRegister[name]
+ _, exists := fileRegister[name]
fileRegisterLock.RUnlock()
- if mc.cfg.AllowAllFiles || fr {
+ if mc.cfg.AllowAllFiles || exists {
var file *os.File
var fi os.FileInfo
@@ -147,14 +146,16 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
}
// send content packets
+ var data []byte
+
// if packetSize == 0, the Reader contains no data
if err == nil && packetSize > 0 {
- data := make([]byte, 4+packetSize)
+ data = make([]byte, 4+packetSize)
var n int
for err == nil {
n, err = rdr.Read(data[4:])
if n > 0 {
- if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4+n]); ioErr != nil {
return ioErr
}
}
@@ -168,15 +169,16 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
if data == nil {
data = make([]byte, 4)
}
- if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil {
return ioErr
}
+ mc.conn().syncSequence()
// read OK packet
if err == nil {
return mc.readResultOK()
}
- mc.readPacket()
+ mc.conn().readPacket()
return err
}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
index 36c8a42c..316a48aa 100644
--- a/vendor/github.com/go-sql-driver/mysql/nulltime.go
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -38,7 +38,7 @@ type NullTime sql.NullTime
// Scan implements the Scanner interface.
// The value type must be time.Time or string / []byte (formatted time-string),
// otherwise Scan fails.
-func (nt *NullTime) Scan(value interface{}) (err error) {
+func (nt *NullTime) Scan(value any) (err error) {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return
@@ -59,7 +59,7 @@ func (nt *NullTime) Scan(value interface{}) (err error) {
}
nt.Valid = false
- return fmt.Errorf("Can't convert %T to time.Time", value)
+ return fmt.Errorf("can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index ee05c95a..831fca6c 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -14,75 +14,108 @@ import (
"database/sql/driver"
"encoding/binary"
"encoding/json"
- "errors"
"fmt"
"io"
"math"
+ "os"
+ "strconv"
"time"
)
-// Packets documentation:
-// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+// MySQL client/server protocol documentations.
+// https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_PROTOCOL.html
+// https://mariadb.com/kb/en/clientserver-protocol/
+
+// read n bytes from mc.buf
+func (mc *mysqlConn) readNext(n int) ([]byte, error) {
+ if mc.buf.len() < n {
+ err := mc.buf.fill(n, mc.readWithTimeout)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return mc.buf.readNext(n), nil
+}
// Read packet to buffer 'data'
func (mc *mysqlConn) readPacket() ([]byte, error) {
var prevData []byte
+ invalidSequence := false
+
+ readNext := mc.readNext
+ if mc.compress {
+ readNext = mc.compIO.readNext
+ }
+
for {
// read packet header
- data, err := mc.buf.readNext(4)
+ data, err := readNext(4)
if err != nil {
+ mc.close()
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
- mc.Close()
+ mc.log(err)
return nil, ErrInvalidConn
}
// packet length [24 bit]
- pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+ pktLen := getUint24(data[:3])
+ seq := data[3]
// check packet sync [8 bit]
- if data[3] != mc.sequence {
- if data[3] > mc.sequence {
- return nil, ErrPktSyncMul
+ if seq != mc.sequence {
+ mc.log(fmt.Sprintf("[warn] unexpected sequence nr: expected %v, got %v", mc.sequence, seq))
+ // MySQL and MariaDB doesn't check packet nr in compressed packet.
+ if !mc.compress {
+ // For large packets, we stop reading as soon as sync error.
+ if len(prevData) > 0 {
+ mc.close()
+ return nil, ErrPktSyncMul
+ }
+ invalidSequence = true
}
- return nil, ErrPktSync
}
- mc.sequence++
+ mc.sequence = seq + 1
// packets with length 0 terminate a previous packet which is a
// multiple of (2^24)-1 bytes long
if pktLen == 0 {
// there was no previous packet
if prevData == nil {
- errLog.Print(ErrMalformPkt)
- mc.Close()
+ mc.log(ErrMalformPkt)
+ mc.close()
return nil, ErrInvalidConn
}
-
return prevData, nil
}
// read packet body [pktLen bytes]
- data, err = mc.buf.readNext(pktLen)
+ data, err = readNext(pktLen)
if err != nil {
+ mc.close()
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
- mc.Close()
+ mc.log(err)
return nil, ErrInvalidConn
}
// return data if this was the last packet
if pktLen < maxPacketSize {
// zero allocations for non-split packets
- if prevData == nil {
- return data, nil
+ if prevData != nil {
+ data = append(prevData, data...)
}
-
- return append(prevData, data...), nil
+ if invalidSequence {
+ mc.close()
+ // return sync error only for regular packet.
+ // error packets may have wrong sequence number.
+ if data[0] != iERR {
+ return nil, ErrPktSync
+ }
+ }
+ return data, nil
}
prevData = append(prevData, data...)
@@ -92,88 +125,52 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// Write packet buffer 'data'
func (mc *mysqlConn) writePacket(data []byte) error {
pktLen := len(data) - 4
-
if pktLen > mc.maxAllowedPacket {
return ErrPktTooLarge
}
- // Perform a stale connection check. We only perform this check for
- // the first query on a connection that has been checked out of the
- // connection pool: a fresh connection from the pool is more likely
- // to be stale, and it has not performed any previous writes that
- // could cause data corruption, so it's safe to return ErrBadConn
- // if the check fails.
- if mc.reset {
- mc.reset = false
- conn := mc.netConn
- if mc.rawConn != nil {
- conn = mc.rawConn
- }
- var err error
- if mc.cfg.CheckConnLiveness {
- if mc.cfg.ReadTimeout != 0 {
- err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
- }
- if err == nil {
- err = connCheck(conn)
- }
- }
- if err != nil {
- errLog.Print("closing bad idle connection: ", err)
- mc.Close()
- return driver.ErrBadConn
- }
+ writeFunc := mc.writeWithTimeout
+ if mc.compress {
+ writeFunc = mc.compIO.writePackets
}
for {
- var size int
- if pktLen >= maxPacketSize {
- data[0] = 0xff
- data[1] = 0xff
- data[2] = 0xff
- size = maxPacketSize
- } else {
- data[0] = byte(pktLen)
- data[1] = byte(pktLen >> 8)
- data[2] = byte(pktLen >> 16)
- size = pktLen
- }
+ size := min(maxPacketSize, pktLen)
+ putUint24(data[:3], size)
data[3] = mc.sequence
// Write packet
- if mc.writeTimeout > 0 {
- if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
- return err
- }
+ if debug {
+ fmt.Fprintf(os.Stderr, "writePacket: size=%v seq=%v\n", size, mc.sequence)
}
- n, err := mc.netConn.Write(data[:4+size])
- if err == nil && n == 4+size {
- mc.sequence++
- if size != maxPacketSize {
- return nil
- }
- pktLen -= size
- data = data[size:]
- continue
- }
-
- // Handle error
- if err == nil { // n != len(data)
+ n, err := writeFunc(data[:4+size])
+ if err != nil {
mc.cleanup()
- errLog.Print(ErrMalformPkt)
- } else {
if cerr := mc.canceled.Value(); cerr != nil {
return cerr
}
if n == 0 && pktLen == len(data)-4 {
// only for the first loop iteration when nothing was written yet
+ mc.log(err)
return errBadConnNoWrite
+ } else {
+ return err
}
- mc.cleanup()
- errLog.Print(err)
}
- return ErrInvalidConn
+ if n != 4+size {
+ // io.Writer(b) must return a non-nil error if it cannot write len(b) bytes.
+ // The io.ErrShortWrite error is used to indicate that this rule has not been followed.
+ mc.cleanup()
+ return io.ErrShortWrite
+ }
+
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
}
}
@@ -186,11 +183,6 @@ func (mc *mysqlConn) writePacket(data []byte) error {
func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
data, err = mc.readPacket()
if err != nil {
- // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
- // in connection initialization we don't risk retrying non-idempotent actions.
- if err == ErrInvalidConn {
- return nil, "", driver.ErrBadConn
- }
return
}
@@ -234,12 +226,15 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro
if len(data) > pos {
// character set [1 byte]
// status flags [2 bytes]
+ pos += 3
// capability flags (upper 2 bytes) [2 bytes]
+ mc.flags |= clientFlag(binary.LittleEndian.Uint16(data[pos:pos+2])) << 16
+ pos += 2
// length of auth-plugin-data [1 byte]
// reserved (all [00]) [10 bytes]
- pos += 1 + 2 + 2 + 1 + 10
+ pos += 11
- // second part of the password cipher [mininum 13 bytes],
+ // second part of the password cipher [minimum 13 bytes],
// where len=MAX(13, length of auth-plugin-data - 8)
//
// The web documentation is ambiguous about the length. However,
@@ -285,12 +280,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
clientLocalFiles |
clientPluginAuth |
clientMultiResults |
+ mc.flags&clientConnectAttrs |
mc.flags&clientLongFlag
+ sendConnectAttrs := mc.flags&clientConnectAttrs != 0
+
if mc.cfg.ClientFoundRows {
clientFlags |= clientFoundRows
}
-
+ if mc.cfg.compress && mc.flags&clientCompress == clientCompress {
+ clientFlags |= clientCompress
+ }
// To enable TLS / SSL
if mc.cfg.TLS != nil {
clientFlags |= clientSSL
@@ -318,34 +318,38 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
pktLen += n + 1
}
+ // encode length of the connection attributes
+ var connAttrsLEI []byte
+ if sendConnectAttrs {
+ var connAttrsLEIBuf [9]byte
+ connAttrsLen := len(mc.connector.encodedAttributes)
+ connAttrsLEI = appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen))
+ pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes)
+ }
+
// Calculate packet length and get buffer with that size
- data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ mc.cleanup()
+ return err
}
// ClientFlags [32 bit]
- data[4] = byte(clientFlags)
- data[5] = byte(clientFlags >> 8)
- data[6] = byte(clientFlags >> 16)
- data[7] = byte(clientFlags >> 24)
+ binary.LittleEndian.PutUint32(data[4:], uint32(clientFlags))
// MaxPacketSize [32 bit] (none)
- data[8] = 0x00
- data[9] = 0x00
- data[10] = 0x00
- data[11] = 0x00
+ binary.LittleEndian.PutUint32(data[8:], 0)
- // Charset [1 byte]
- var found bool
- data[12], found = collations[mc.cfg.Collation]
- if !found {
- // Note possibility for false negatives:
- // could be triggered although the collation is valid if the
- // collations map does not contain entries the server supports.
- return errors.New("unknown collation")
+ // Collation ID [1 byte]
+ data[12] = defaultCollationID
+ if cname := mc.cfg.Collation; cname != "" {
+ colID, ok := collations[cname]
+ if ok {
+ data[12] = colID
+ } else if len(mc.cfg.charsets) > 0 {
+ // When cfg.charset is set, the collation is set by `SET NAMES COLLATE `.
+ return fmt.Errorf("unknown collation: %q", cname)
+ }
}
// Filler [23 bytes] (all 0x00)
@@ -365,11 +369,12 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
// Switch to TLS
tlsConn := tls.Client(mc.netConn, mc.cfg.TLS)
if err := tlsConn.Handshake(); err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
return err
}
- mc.rawConn = mc.netConn
mc.netConn = tlsConn
- mc.buf.nc = tlsConn
}
// User [null terminated string]
@@ -394,6 +399,12 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
data[pos] = 0x00
pos++
+ // Connection Attributes
+ if sendConnectAttrs {
+ pos += copy(data[pos:], connAttrsLEI)
+ pos += copy(data[pos:], []byte(mc.connector.encodedAttributes))
+ }
+
// Send Auth packet
return mc.writePacket(data[:pos])
}
@@ -401,11 +412,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
pktLen := 4 + len(authData)
- data, err := mc.buf.takeSmallBuffer(pktLen)
+ data, err := mc.buf.takeBuffer(pktLen)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ mc.cleanup()
+ return err
}
// Add the auth data [EOF]
@@ -419,32 +429,30 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
func (mc *mysqlConn) writeCommandPacket(command byte) error {
// Reset Packet Sequence
- mc.sequence = 0
+ mc.resetSequence()
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ return err
}
// Add command byte
data[4] = command
// Send CMD packet
- return mc.writePacket(data)
+ err = mc.writePacket(data)
+ mc.syncSequence()
+ return err
}
func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
// Reset Packet Sequence
- mc.sequence = 0
+ mc.resetSequence()
pktLen := 1 + len(arg)
data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ return err
}
// Add command byte
@@ -454,31 +462,30 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
copy(data[5:], arg)
// Send CMD packet
- return mc.writePacket(data)
+ err = mc.writePacket(data)
+ mc.syncSequence()
+ return err
}
func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
// Reset Packet Sequence
- mc.sequence = 0
+ mc.resetSequence()
data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ return err
}
// Add command byte
data[4] = command
// Add arg [32 bit]
- data[5] = byte(arg)
- data[6] = byte(arg >> 8)
- data[7] = byte(arg >> 16)
- data[8] = byte(arg >> 24)
+ binary.LittleEndian.PutUint32(data[5:], arg)
// Send CMD packet
- return mc.writePacket(data)
+ err = mc.writePacket(data)
+ mc.syncSequence()
+ return err
}
/******************************************************************************
@@ -495,7 +502,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
switch data[0] {
case iOK:
- return nil, "", mc.handleOkPacket(data)
+ // resultUnchanged, since auth happens before any queries or
+ // commands have been executed.
+ return nil, "", mc.resultUnchanged().handleOkPacket(data)
case iAuthMoreData:
return data[1:], "", err
@@ -511,6 +520,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
}
plugin := string(data[1:pluginEndIndex])
authData := data[pluginEndIndex+1:]
+ if len(authData) > 0 && authData[len(authData)-1] == 0 {
+ authData = authData[:len(authData)-1]
+ }
return authData, plugin, nil
default: // Error otherwise
@@ -518,9 +530,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
}
}
-// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() error {
- data, err := mc.readPacket()
+// Returns error if Packet is not a 'Result OK'-Packet
+func (mc *okHandler) readResultOK() error {
+ data, err := mc.conn().readPacket()
if err != nil {
return err
}
@@ -528,35 +540,37 @@ func (mc *mysqlConn) readResultOK() error {
if data[0] == iOK {
return mc.handleOkPacket(data)
}
- return mc.handleErrorPacket(data)
+ return mc.conn().handleErrorPacket(data)
}
// Result Set Header Packet
-// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
-func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
- data, err := mc.readPacket()
- if err == nil {
- switch data[0] {
+// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response.html
+func (mc *okHandler) readResultSetHeaderPacket() (int, error) {
+ // handleOkPacket replaces both values; other cases leave the values unchanged.
+ mc.result.affectedRows = append(mc.result.affectedRows, 0)
+ mc.result.insertIds = append(mc.result.insertIds, 0)
- case iOK:
- return 0, mc.handleOkPacket(data)
-
- case iERR:
- return 0, mc.handleErrorPacket(data)
-
- case iLocalInFile:
- return 0, mc.handleInFileRequest(string(data[1:]))
- }
-
- // column count
- num, _, n := readLengthEncodedInteger(data)
- if n-len(data) == 0 {
- return int(num), nil
- }
-
- return 0, ErrMalformPkt
+ data, err := mc.conn().readPacket()
+ if err != nil {
+ return 0, err
}
- return 0, err
+
+ switch data[0] {
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.conn().handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response_text_resultset.html
+ num, _, _ := readLengthEncodedInteger(data)
+ // ignore remaining data in the packet. see #1478.
+ return int(num), nil
}
// Error Packet
@@ -573,7 +587,8 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
// 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
// 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
- if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // 1836: ER_READ_ONLY_MODE
+ if (errno == 1792 || errno == 1290 || errno == 1836) && mc.cfg.RejectReadOnly {
// Oops; we are connected to a read-only connection, and won't be able
// to issue any write statements. Since RejectReadOnly is configured,
// we throw away this connection hoping this one would have write
@@ -607,18 +622,61 @@ func readStatus(b []byte) statusFlag {
return statusFlag(b[0]) | statusFlag(b[1])<<8
}
+// Returns an instance of okHandler for codepaths where mysqlConn.result doesn't
+// need to be cleared first (e.g. during authentication, or while additional
+// resultsets are being fetched.)
+func (mc *mysqlConn) resultUnchanged() *okHandler {
+ return (*okHandler)(mc)
+}
+
+// okHandler represents the state of the connection when mysqlConn.result has
+// been prepared for processing of OK packets.
+//
+// To correctly populate mysqlConn.result (updated by handleOkPacket()), all
+// callpaths must either:
+//
+// 1. first clear it using clearResult(), or
+// 2. confirm that they don't need to (by calling resultUnchanged()).
+//
+// Both return an instance of type *okHandler.
+type okHandler mysqlConn
+
+// Exposes the underlying type's methods.
+func (mc *okHandler) conn() *mysqlConn {
+ return (*mysqlConn)(mc)
+}
+
+// clearResult clears the connection's stored affectedRows and insertIds
+// fields.
+//
+// It returns a handler that can process OK responses.
+func (mc *mysqlConn) clearResult() *okHandler {
+ mc.result = mysqlResult{}
+ return (*okHandler)(mc)
+}
+
// Ok Packet
// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
-func (mc *mysqlConn) handleOkPacket(data []byte) error {
+func (mc *okHandler) handleOkPacket(data []byte) error {
var n, m int
+ var affectedRows, insertId uint64
// 0x00 [1 byte]
// Affected rows [Length Coded Binary]
- mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+ affectedRows, _, n = readLengthEncodedInteger(data[1:])
// Insert id [Length Coded Binary]
- mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+ insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // Update for the current statement result (only used by
+ // readResultSetHeaderPacket).
+ if len(mc.result.affectedRows) > 0 {
+ mc.result.affectedRows[len(mc.result.affectedRows)-1] = int64(affectedRows)
+ }
+ if len(mc.result.insertIds) > 0 {
+ mc.result.insertIds[len(mc.result.insertIds)-1] = int64(insertId)
+ }
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
@@ -769,7 +827,8 @@ func (rows *textRows) readRow(dest []driver.Value) error {
for i := range dest {
// Read bytes and convert to string
- dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ var buf []byte
+ buf, isNull, n, err = readLengthEncodedString(data[pos:])
pos += n
if err != nil {
@@ -781,19 +840,40 @@ func (rows *textRows) readRow(dest []driver.Value) error {
continue
}
- if !mc.parseTime {
- continue
- }
-
- // Parse time field
switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp,
fieldTypeDateTime,
fieldTypeDate,
fieldTypeNewDate:
- if dest[i], err = parseDateTime(dest[i].([]byte), mc.cfg.Loc); err != nil {
- return err
+ if mc.parseTime {
+ dest[i], err = parseDateTime(buf, mc.cfg.Loc)
+ } else {
+ dest[i] = buf
}
+
+ case fieldTypeTiny, fieldTypeShort, fieldTypeInt24, fieldTypeYear, fieldTypeLong:
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i], err = strconv.ParseUint(string(buf), 10, 64)
+ } else {
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+ }
+
+ case fieldTypeFloat:
+ var d float64
+ d, err = strconv.ParseFloat(string(buf), 32)
+ dest[i] = float32(d)
+
+ case fieldTypeDouble:
+ dest[i], err = strconv.ParseFloat(string(buf), 64)
+
+ default:
+ dest[i] = buf
+ }
+ if err != nil {
+ return err
}
}
@@ -875,32 +955,26 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
pktLen = dataOffset + argLen
}
- stmt.mc.sequence = 0
// Add command byte [1 byte]
data[4] = comStmtSendLongData
// Add stmtID [32 bit]
- data[5] = byte(stmt.id)
- data[6] = byte(stmt.id >> 8)
- data[7] = byte(stmt.id >> 16)
- data[8] = byte(stmt.id >> 24)
+ binary.LittleEndian.PutUint32(data[5:], stmt.id)
// Add paramID [16 bit]
- data[9] = byte(paramID)
- data[10] = byte(paramID >> 8)
+ binary.LittleEndian.PutUint16(data[9:], uint16(paramID))
// Send CMD packet
err := stmt.mc.writePacket(data[:4+pktLen])
+ // Every COM_LONG_DATA packet reset Packet Sequence
+ stmt.mc.resetSequence()
if err == nil {
data = data[pktLen-dataOffset:]
continue
}
return err
-
}
- // Reset Packet Sequence
- stmt.mc.sequence = 0
return nil
}
@@ -925,7 +999,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
// Reset packet-sequence
- mc.sequence = 0
+ mc.resetSequence()
var data []byte
var err error
@@ -937,28 +1011,20 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// In this case the len(data) == cap(data) which is used to optimise the flow below.
}
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ return err
}
// command [1 byte]
data[4] = comStmtExecute
// statement_id [4 bytes]
- data[5] = byte(stmt.id)
- data[6] = byte(stmt.id >> 8)
- data[7] = byte(stmt.id >> 16)
- data[8] = byte(stmt.id >> 24)
+ binary.LittleEndian.PutUint32(data[5:], stmt.id)
// flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
data[9] = 0x00
// iteration_count (uint32(1)) [4 bytes]
- data[10] = 0x01
- data[11] = 0x00
- data[12] = 0x00
- data[13] = 0x00
+ binary.LittleEndian.PutUint32(data[10:], 1)
if len(args) > 0 {
pos := minPktLen
@@ -1012,50 +1078,17 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
case int64:
paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x00
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- uint64(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(uint64(v))...,
- )
- }
+ paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v))
case uint64:
paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x80 // type is unsigned
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- uint64(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(uint64(v))...,
- )
- }
+ paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v))
case float64:
paramTypes[i+i] = byte(fieldTypeDouble)
paramTypes[i+i+1] = 0x00
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- math.Float64bits(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(math.Float64bits(v))...,
- )
- }
+ paramValues = binary.LittleEndian.AppendUint64(paramValues, math.Float64bits(v))
case bool:
paramTypes[i+i] = byte(fieldTypeTiny)
@@ -1116,7 +1149,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if v.IsZero() {
b = append(b, "0000-00-00"...)
} else {
- b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return err
}
@@ -1136,20 +1169,21 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// In that case we must build the data packet with the new values buffer
if valuesCap != cap(paramValues) {
data = append(data[:pos], paramValues...)
- if err = mc.buf.store(data); err != nil {
- errLog.Print(err)
- return errBadConnNoWrite
- }
+ mc.buf.store(data) // allow this buffer to be reused
}
pos += len(paramValues)
data = data[:pos]
}
- return mc.writePacket(data)
+ err = mc.writePacket(data)
+ mc.syncSequence()
+ return err
}
-func (mc *mysqlConn) discardResults() error {
+// For each remaining resultset in the stream, discards its rows and updates
+// mc.affectedRows and mc.insertIds.
+func (mc *okHandler) discardResults() error {
for mc.status&statusMoreResultsExists != 0 {
resLen, err := mc.readResultSetHeaderPacket()
if err != nil {
@@ -1157,11 +1191,11 @@ func (mc *mysqlConn) discardResults() error {
}
if resLen > 0 {
// columns
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
// rows
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
}
@@ -1268,7 +1302,8 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeVector:
var isNull bool
var n int
dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
index c6438d03..d5163146 100644
--- a/vendor/github.com/go-sql-driver/mysql/result.go
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -8,15 +8,43 @@
package mysql
+import "database/sql/driver"
+
+// Result exposes data not available through *connection.Result.
+//
+// This is accessible by executing statements using sql.Conn.Raw() and
+// downcasting the returned result:
+//
+// res, err := rawConn.Exec(...)
+// res.(mysql.Result).AllRowsAffected()
+type Result interface {
+ driver.Result
+ // AllRowsAffected returns a slice containing the affected rows for each
+ // executed statement.
+ AllRowsAffected() []int64
+ // AllLastInsertIds returns a slice containing the last inserted ID for each
+ // executed statement.
+ AllLastInsertIds() []int64
+}
+
type mysqlResult struct {
- affectedRows int64
- insertId int64
+ // One entry in both slices is created for every executed statement result.
+ affectedRows []int64
+ insertIds []int64
}
func (res *mysqlResult) LastInsertId() (int64, error) {
- return res.insertId, nil
+ return res.insertIds[len(res.insertIds)-1], nil
}
func (res *mysqlResult) RowsAffected() (int64, error) {
- return res.affectedRows, nil
+ return res.affectedRows[len(res.affectedRows)-1], nil
+}
+
+func (res *mysqlResult) AllLastInsertIds() []int64 {
+ return append([]int64{}, res.insertIds...) // defensive copy
+}
+
+func (res *mysqlResult) AllRowsAffected() []int64 {
+ return append([]int64{}, res.affectedRows...) // defensive copy
}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index 888bdb5f..df98417b 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -111,19 +111,13 @@ func (rows *mysqlRows) Close() (err error) {
return err
}
- // flip the buffer for this connection if we need to drain it.
- // note that for a successful query (i.e. one where rows.next()
- // has been called until it returns false), `rows.mc` will be nil
- // by the time the user calls `(*Rows).Close`, so we won't reach this
- // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
- mc.buf.flip()
-
// Remove unread packets from stream
if !rows.rs.done {
err = mc.readUntilEOF()
}
if err == nil {
- if err = mc.discardResults(); err != nil {
+ handleOk := mc.clearResult()
+ if err = handleOk.discardResults(); err != nil {
return err
}
}
@@ -160,7 +154,15 @@ func (rows *mysqlRows) nextResultSet() (int, error) {
return 0, io.EOF
}
rows.rs = resultSet{}
- return rows.mc.readResultSetHeaderPacket()
+ // rows.mc.affectedRows and rows.mc.insertIds accumulate on each call to
+ // nextResultSet.
+ resLen, err := rows.mc.resultUnchanged().readResultSetHeaderPacket()
+ if err != nil {
+ // Clean up about multi-results flag
+ rows.rs.done = true
+ rows.mc.status = rows.mc.status & (^statusMoreResultsExists)
+ }
+ return resLen, err
}
func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index 10ece8bd..35df8545 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -24,11 +24,12 @@ type mysqlStmt struct {
func (stmt *mysqlStmt) Close() error {
if stmt.mc == nil || stmt.mc.closed.Load() {
- // driver.Stmt.Close can be called more than once, thus this function
- // has to be idempotent.
- // See also Issue #450 and golang/go#16019.
- //errLog.Print(ErrInvalidConn)
- return driver.ErrBadConn
+ // driver.Stmt.Close could be called more than once, thus this function
+ // had to be idempotent. See also Issue #450 and golang/go#16019.
+ // This bug has been fixed in Go 1.8.
+ // https://github.com/golang/go/commit/90b8a0ca2d0b565c7c7199ffcf77b15ea6b6db3a
+ // But we keep this function idempotent because it is safer.
+ return nil
}
err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
@@ -51,7 +52,6 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -61,12 +61,10 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
mc := stmt.mc
-
- mc.affectedRows = 0
- mc.insertId = 0
+ handleOk := stmt.mc.clearResult()
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -83,14 +81,12 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
}
- if err := mc.discardResults(); err != nil {
+ if err := handleOk.discardResults(); err != nil {
return nil, err
}
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+ copied := mc.result
+ return &copied, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
@@ -99,7 +95,6 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -111,7 +106,8 @@ func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
mc := stmt.mc
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ handleOk := stmt.mc.clearResult()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -144,7 +140,7 @@ type converter struct{}
// implementation does not. This function should be kept in sync with
// database/sql/driver defaultConverter.ConvertValue() except for that
// deliberate difference.
-func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+func (c converter) ConvertValue(v any) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
index 4a4b6100..8c502f49 100644
--- a/vendor/github.com/go-sql-driver/mysql/transaction.go
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -13,18 +13,32 @@ type mysqlTx struct {
}
func (tx *mysqlTx) Commit() (err error) {
- if tx.mc == nil || tx.mc.closed.Load() {
+ if tx.mc == nil {
return ErrInvalidConn
}
+ if tx.mc.closed.Load() {
+ err = tx.mc.error()
+ if err == nil {
+ err = ErrInvalidConn
+ }
+ return
+ }
err = tx.mc.exec("COMMIT")
tx.mc = nil
return
}
func (tx *mysqlTx) Rollback() (err error) {
- if tx.mc == nil || tx.mc.closed.Load() {
+ if tx.mc == nil {
return ErrInvalidConn
}
+ if tx.mc.closed.Load() {
+ err = tx.mc.error()
+ if err == nil {
+ err = ErrInvalidConn
+ }
+ return
+ }
err = tx.mc.exec("ROLLBACK")
tx.mc = nil
return
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index 15dbd8d1..8716c26c 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -36,7 +36,7 @@ var (
// registering it.
//
// rootCertPool := x509.NewCertPool()
-// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// pem, err := os.ReadFile("/path/ca-cert.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -265,7 +265,11 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
}
-func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+func appendDateTime(buf []byte, t time.Time, timeTruncate time.Duration) ([]byte, error) {
+ if timeTruncate > 0 {
+ t = t.Truncate(timeTruncate)
+ }
+
year, month, day := t.Date()
hour, min, sec := t.Clock()
nsec := t.Nanosecond()
@@ -486,17 +490,16 @@ func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
* Convert from and to bytes *
******************************************************************************/
-func uint64ToBytes(n uint64) []byte {
- return []byte{
- byte(n),
- byte(n >> 8),
- byte(n >> 16),
- byte(n >> 24),
- byte(n >> 32),
- byte(n >> 40),
- byte(n >> 48),
- byte(n >> 56),
- }
+// 24bit integer: used for packet headers.
+
+func putUint24(data []byte, n int) {
+ data[2] = byte(n >> 16)
+ data[1] = byte(n >> 8)
+ data[0] = byte(n)
+}
+
+func getUint24(data []byte) int {
+ return int(data[2])<<16 | int(data[1])<<8 | int(data[0])
}
func uint64ToString(n uint64) []byte {
@@ -521,16 +524,6 @@ func uint64ToString(n uint64) []byte {
return a[i:]
}
-// treats string value as unsigned integer representation
-func stringToInt(b []byte) int {
- val := 0
- for i := range b {
- val *= 10
- val += int(b[i] - 0x30)
- }
- return val
-}
-
// returns the string read as a bytes slice, whether the value is NULL,
// the number of bytes read and an error, in case the string is longer than
// the input slice
@@ -582,18 +575,15 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
// 252: value of following 2
case 0xfc:
- return uint64(b[1]) | uint64(b[2])<<8, false, 3
+ return uint64(binary.LittleEndian.Uint16(b[1:])), false, 3
// 253: value of following 3
case 0xfd:
- return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+ return uint64(getUint24(b[1:])), false, 4
// 254: value of following 8
case 0xfe:
- return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
- uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
- uint64(b[7])<<48 | uint64(b[8])<<56,
- false, 9
+ return uint64(binary.LittleEndian.Uint64(b[1:])), false, 9
}
// 0-250: value of first byte
@@ -607,13 +597,19 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte {
return append(b, byte(n))
case n <= 0xffff:
- return append(b, 0xfc, byte(n), byte(n>>8))
+ b = append(b, 0xfc)
+ return binary.LittleEndian.AppendUint16(b, uint16(n))
case n <= 0xffffff:
return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
}
- return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
- byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+ b = append(b, 0xfe)
+ return binary.LittleEndian.AppendUint64(b, n)
+}
+
+func appendLengthEncodedString(b []byte, s string) []byte {
+ b = appendLengthEncodedInteger(b, uint64(len(s)))
+ return append(b, s...)
}
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
diff --git a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md
deleted file mode 100644
index c4efbd2a..00000000
--- a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md
+++ /dev/null
@@ -1,22 +0,0 @@
-## Migration Guide (v3.2.1)
-
-Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1]), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path.
-
-### go.mod replacement
-
-In a first step, the easiest way is to use `go mod edit` to issue a replacement.
-
-```
-go mod edit -replace github.com/dgrijalva/jwt-go=github.com/golang-jwt/jwt@v3.2.1+incompatible
-go mod tidy
-```
-
-This will still keep the old import path in your code but replace it with the new package and also introduce a new indirect dependency to `github.com/golang-jwt/jwt`. Try to compile your project; it should still work.
-
-### Cleanup
-
-If your code still consistently builds, you can replace all occurences of `github.com/dgrijalva/jwt-go` with `github.com/golang-jwt/jwt`, either manually or by using tools such as `sed`. Finally, the `replace` directive in the `go.mod` file can be removed.
-
-## Older releases (before v3.2.0)
-
-The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
\ No newline at end of file
diff --git a/vendor/github.com/golang-jwt/jwt/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go
deleted file mode 100644
index f1dba3cb..00000000
--- a/vendor/github.com/golang-jwt/jwt/claims.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package jwt
-
-import (
- "crypto/subtle"
- "fmt"
- "time"
-)
-
-// For a type to be a Claims object, it must just have a Valid method that determines
-// if the token is invalid for any supported reason
-type Claims interface {
- Valid() error
-}
-
-// Structured version of Claims Section, as referenced at
-// https://tools.ietf.org/html/rfc7519#section-4.1
-// See examples for how to use this with your own claim types
-type StandardClaims struct {
- Audience string `json:"aud,omitempty"`
- ExpiresAt int64 `json:"exp,omitempty"`
- Id string `json:"jti,omitempty"`
- IssuedAt int64 `json:"iat,omitempty"`
- Issuer string `json:"iss,omitempty"`
- NotBefore int64 `json:"nbf,omitempty"`
- Subject string `json:"sub,omitempty"`
-}
-
-// Validates time based claims "exp, iat, nbf".
-// There is no accounting for clock skew.
-// As well, if any of the above claims are not in the token, it will still
-// be considered a valid claim.
-func (c StandardClaims) Valid() error {
- vErr := new(ValidationError)
- now := TimeFunc().Unix()
-
- // The claims below are optional, by default, so if they are set to the
- // default value in Go, let's not fail the verification for them.
- if !c.VerifyExpiresAt(now, false) {
- delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
- vErr.Inner = fmt.Errorf("token is expired by %v", delta)
- vErr.Errors |= ValidationErrorExpired
- }
-
- if !c.VerifyIssuedAt(now, false) {
- vErr.Inner = fmt.Errorf("Token used before issued")
- vErr.Errors |= ValidationErrorIssuedAt
- }
-
- if !c.VerifyNotBefore(now, false) {
- vErr.Inner = fmt.Errorf("token is not valid yet")
- vErr.Errors |= ValidationErrorNotValidYet
- }
-
- if vErr.valid() {
- return nil
- }
-
- return vErr
-}
-
-// Compares the aud claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
- return verifyAud([]string{c.Audience}, cmp, req)
-}
-
-// Compares the exp claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
- return verifyExp(c.ExpiresAt, cmp, req)
-}
-
-// Compares the iat claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
- return verifyIat(c.IssuedAt, cmp, req)
-}
-
-// Compares the iss claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
- return verifyIss(c.Issuer, cmp, req)
-}
-
-// Compares the nbf claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
- return verifyNbf(c.NotBefore, cmp, req)
-}
-
-// ----- helpers
-
-func verifyAud(aud []string, cmp string, required bool) bool {
- if len(aud) == 0 {
- return !required
- }
- // use a var here to keep constant time compare when looping over a number of claims
- result := false
-
- var stringClaims string
- for _, a := range aud {
- if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
- result = true
- }
- stringClaims = stringClaims + a
- }
-
- // case where "" is sent in one or many aud claims
- if len(stringClaims) == 0 {
- return !required
- }
-
- return result
-}
-
-func verifyExp(exp int64, now int64, required bool) bool {
- if exp == 0 {
- return !required
- }
- return now <= exp
-}
-
-func verifyIat(iat int64, now int64, required bool) bool {
- if iat == 0 {
- return !required
- }
- return now >= iat
-}
-
-func verifyIss(iss string, cmp string, required bool) bool {
- if iss == "" {
- return !required
- }
- if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
- return true
- } else {
- return false
- }
-}
-
-func verifyNbf(nbf int64, now int64, required bool) bool {
- if nbf == 0 {
- return !required
- }
- return now >= nbf
-}
diff --git a/vendor/github.com/golang-jwt/jwt/errors.go b/vendor/github.com/golang-jwt/jwt/errors.go
deleted file mode 100644
index 1c93024a..00000000
--- a/vendor/github.com/golang-jwt/jwt/errors.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package jwt
-
-import (
- "errors"
-)
-
-// Error constants
-var (
- ErrInvalidKey = errors.New("key is invalid")
- ErrInvalidKeyType = errors.New("key is of invalid type")
- ErrHashUnavailable = errors.New("the requested hash function is unavailable")
-)
-
-// The errors that might occur when parsing and validating a token
-const (
- ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
- ValidationErrorUnverifiable // Token could not be verified because of signing problems
- ValidationErrorSignatureInvalid // Signature validation failed
-
- // Standard Claim validation errors
- ValidationErrorAudience // AUD validation failed
- ValidationErrorExpired // EXP validation failed
- ValidationErrorIssuedAt // IAT validation failed
- ValidationErrorIssuer // ISS validation failed
- ValidationErrorNotValidYet // NBF validation failed
- ValidationErrorId // JTI validation failed
- ValidationErrorClaimsInvalid // Generic claims validation error
-)
-
-// Helper for constructing a ValidationError with a string error message
-func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
- return &ValidationError{
- text: errorText,
- Errors: errorFlags,
- }
-}
-
-// The error from Parse if token is not valid
-type ValidationError struct {
- Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
- Errors uint32 // bitfield. see ValidationError... constants
- text string // errors that do not have a valid error just have text
-}
-
-// Validation error is an error type
-func (e ValidationError) Error() string {
- if e.Inner != nil {
- return e.Inner.Error()
- } else if e.text != "" {
- return e.text
- } else {
- return "token is invalid"
- }
-}
-
-// No errors
-func (e *ValidationError) valid() bool {
- return e.Errors == 0
-}
diff --git a/vendor/github.com/golang-jwt/jwt/token.go b/vendor/github.com/golang-jwt/jwt/token.go
deleted file mode 100644
index 6b30ced1..00000000
--- a/vendor/github.com/golang-jwt/jwt/token.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package jwt
-
-import (
- "encoding/base64"
- "encoding/json"
- "strings"
- "time"
-)
-
-// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
-// You can override it to use another time value. This is useful for testing or if your
-// server uses a different time zone than your tokens.
-var TimeFunc = time.Now
-
-// Parse methods use this callback function to supply
-// the key for verification. The function receives the parsed,
-// but unverified Token. This allows you to use properties in the
-// Header of the token (such as `kid`) to identify which key to use.
-type Keyfunc func(*Token) (interface{}, error)
-
-// A JWT Token. Different fields will be used depending on whether you're
-// creating or parsing/verifying a token.
-type Token struct {
- Raw string // The raw token. Populated when you Parse a token
- Method SigningMethod // The signing method used or to be used
- Header map[string]interface{} // The first segment of the token
- Claims Claims // The second segment of the token
- Signature string // The third segment of the token. Populated when you Parse a token
- Valid bool // Is the token valid? Populated when you Parse/Verify a token
-}
-
-// Create a new Token. Takes a signing method
-func New(method SigningMethod) *Token {
- return NewWithClaims(method, MapClaims{})
-}
-
-func NewWithClaims(method SigningMethod, claims Claims) *Token {
- return &Token{
- Header: map[string]interface{}{
- "typ": "JWT",
- "alg": method.Alg(),
- },
- Claims: claims,
- Method: method,
- }
-}
-
-// Get the complete, signed token
-func (t *Token) SignedString(key interface{}) (string, error) {
- var sig, sstr string
- var err error
- if sstr, err = t.SigningString(); err != nil {
- return "", err
- }
- if sig, err = t.Method.Sign(sstr, key); err != nil {
- return "", err
- }
- return strings.Join([]string{sstr, sig}, "."), nil
-}
-
-// Generate the signing string. This is the
-// most expensive part of the whole deal. Unless you
-// need this for something special, just go straight for
-// the SignedString.
-func (t *Token) SigningString() (string, error) {
- var err error
- parts := make([]string, 2)
- for i := range parts {
- var jsonValue []byte
- if i == 0 {
- if jsonValue, err = json.Marshal(t.Header); err != nil {
- return "", err
- }
- } else {
- if jsonValue, err = json.Marshal(t.Claims); err != nil {
- return "", err
- }
- }
-
- parts[i] = EncodeSegment(jsonValue)
- }
- return strings.Join(parts, "."), nil
-}
-
-// Parse, validate, and return a token.
-// keyFunc will receive the parsed token and should return the key for validating.
-// If everything is kosher, err will be nil
-func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
- return new(Parser).Parse(tokenString, keyFunc)
-}
-
-func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
-}
-
-// Encode JWT specific base64url encoding with padding stripped
-func EncodeSegment(seg []byte) string {
- return base64.RawURLEncoding.EncodeToString(seg)
-}
-
-// Decode JWT specific base64url encoding with padding stripped
-func DecodeSegment(seg string) ([]byte, error) {
- return base64.RawURLEncoding.DecodeString(seg)
-}
diff --git a/vendor/github.com/golang-jwt/jwt/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore
similarity index 100%
rename from vendor/github.com/golang-jwt/jwt/.gitignore
rename to vendor/github.com/golang-jwt/jwt/v4/.gitignore
diff --git a/vendor/github.com/golang-jwt/jwt/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE
similarity index 100%
rename from vendor/github.com/golang-jwt/jwt/LICENSE
rename to vendor/github.com/golang-jwt/jwt/v4/LICENSE
diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md
new file mode 100644
index 00000000..32966f59
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md
@@ -0,0 +1,22 @@
+## Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be:
+
+ "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as
+`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having
+troubles migrating, please open an issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+## Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
diff --git a/vendor/github.com/golang-jwt/jwt/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md
similarity index 66%
rename from vendor/github.com/golang-jwt/jwt/README.md
rename to vendor/github.com/golang-jwt/jwt/v4/README.md
index 9b653e46..30f2f2a6 100644
--- a/vendor/github.com/golang-jwt/jwt/README.md
+++ b/vendor/github.com/golang-jwt/jwt/v4/README.md
@@ -1,13 +1,15 @@
# jwt-go
[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
-[](https://pkg.go.dev/github.com/golang-jwt/jwt)
+[](https://pkg.go.dev/github.com/golang-jwt/jwt/v4)
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
-**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic.
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`.
+See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information.
+
+> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic.
-Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path.
**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail.
@@ -34,23 +36,45 @@ The part in the middle is the interesting bit. It's called the Claims and conta
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v4
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v4"
+```
+
## Examples
-See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage:
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage:
-* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac)
-* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac)
-* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples)
+* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac)
+* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac)
+* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples)
## Extensions
-This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
+This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`.
-Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
+A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards.
+
+| Extension | Purpose | Repo |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
+| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers
## Compliance
-This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences:
+This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences:
* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
@@ -60,10 +84,8 @@ This library is considered production ready. Feedback and feature requests are
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases).
-While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning.
-
**BREAKING CHANGES:***
-* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
+A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
## Usage Tips
@@ -74,7 +96,7 @@ A token is simply a JSON object that is signed by its author. this tells you exa
* The author of the token was in the possession of the signing secret
* The data has not been modified since it was signed
-It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
+It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard.
### Choosing a Signing Method
@@ -88,9 +110,10 @@ Asymmetric signing methods, such as RSA, use different keys for signing and veri
Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
-* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
-* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
-* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
+* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
+* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation
### JWT and OAuth
@@ -108,6 +131,8 @@ This library uses descriptive error messages whenever possible. If you are not g
## More
-Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt).
+Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4).
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
new file mode 100644
index 00000000..b08402c3
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of February 2022 (and until this document is updated), the latest version `v4` is supported.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!
diff --git a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
similarity index 98%
rename from vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md
rename to vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
index 637f2ba6..afbfc4e4 100644
--- a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md
+++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
@@ -1,5 +1,9 @@
## `jwt-go` Version History
+#### 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
#### 3.2.2
* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go
new file mode 100644
index 00000000..364cec87
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go
@@ -0,0 +1,269 @@
+package jwt
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "time"
+)
+
+// Claims must just have a Valid method that determines
+// if the token is invalid for any supported reason
+type Claims interface {
+ Valid() error
+}
+
+// RegisteredClaims are a structured version of the JWT Claims Set,
+// restricted to Registered Claim Names, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+//
+// This type can be used on its own, but then additional private and
+// public claims embedded in the JWT will not be parsed. The typical usecase
+// therefore is to embedded this in a user-defined claim type.
+//
+// See examples for how to use this with your own claim types.
+type RegisteredClaims struct {
+ // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+ Issuer string `json:"iss,omitempty"`
+
+ // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+ Subject string `json:"sub,omitempty"`
+
+ // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+ Audience ClaimStrings `json:"aud,omitempty"`
+
+ // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+ ExpiresAt *NumericDate `json:"exp,omitempty"`
+
+ // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+ NotBefore *NumericDate `json:"nbf,omitempty"`
+
+ // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+ IssuedAt *NumericDate `json:"iat,omitempty"`
+
+ // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+ ID string `json:"jti,omitempty"`
+}
+
+// Valid validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c RegisteredClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc()
+
+ // The claims below are optional, by default, so if they are set to the
+ // default value in Go, let's not fail the verification for them.
+ if !c.VerifyExpiresAt(now, false) {
+ delta := now.Sub(c.ExpiresAt.Time)
+ vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if !c.VerifyIssuedAt(now, false) {
+ vErr.Inner = ErrTokenUsedBeforeIssued
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if !c.VerifyNotBefore(now, false) {
+ vErr.Inner = ErrTokenNotValidYet
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
+
+// VerifyAudience compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool {
+ return verifyAud(c.Audience, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp (cmp < exp).
+// If req is false, it will return true, if exp is unset.
+func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool {
+ if c.ExpiresAt == nil {
+ return verifyExp(nil, cmp, req)
+ }
+
+ return verifyExp(&c.ExpiresAt.Time, cmp, req)
+}
+
+// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
+func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool {
+ if c.IssuedAt == nil {
+ return verifyIat(nil, cmp, req)
+ }
+
+ return verifyIat(&c.IssuedAt.Time, cmp, req)
+}
+
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool {
+ if c.NotBefore == nil {
+ return verifyNbf(nil, cmp, req)
+ }
+
+ return verifyNbf(&c.NotBefore.Time, cmp, req)
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool {
+ return verifyIss(c.Issuer, cmp, req)
+}
+
+// StandardClaims are a structured version of the JWT Claims Set, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the
+// specification exactly, since they were based on an earlier draft of the
+// specification and not updated. The main difference is that they only
+// support integer-based date fields and singular audiences. This might lead to
+// incompatibilities with other JWT implementations. The use of this is discouraged, instead
+// the newer RegisteredClaims struct should be used.
+//
+// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct.
+type StandardClaims struct {
+ Audience string `json:"aud,omitempty"`
+ ExpiresAt int64 `json:"exp,omitempty"`
+ Id string `json:"jti,omitempty"`
+ IssuedAt int64 `json:"iat,omitempty"`
+ Issuer string `json:"iss,omitempty"`
+ NotBefore int64 `json:"nbf,omitempty"`
+ Subject string `json:"sub,omitempty"`
+}
+
+// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c StandardClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ // The claims below are optional, by default, so if they are set to the
+ // default value in Go, let's not fail the verification for them.
+ if !c.VerifyExpiresAt(now, false) {
+ delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+ vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if !c.VerifyIssuedAt(now, false) {
+ vErr.Inner = ErrTokenUsedBeforeIssued
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if !c.VerifyNotBefore(now, false) {
+ vErr.Inner = ErrTokenNotValidYet
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
+
+// VerifyAudience compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+ return verifyAud([]string{c.Audience}, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp (cmp < exp).
+// If req is false, it will return true, if exp is unset.
+func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ if c.ExpiresAt == 0 {
+ return verifyExp(nil, time.Unix(cmp, 0), req)
+ }
+
+ t := time.Unix(c.ExpiresAt, 0)
+ return verifyExp(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
+func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ if c.IssuedAt == 0 {
+ return verifyIat(nil, time.Unix(cmp, 0), req)
+ }
+
+ t := time.Unix(c.IssuedAt, 0)
+ return verifyIat(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ if c.NotBefore == 0 {
+ return verifyNbf(nil, time.Unix(cmp, 0), req)
+ }
+
+ t := time.Unix(c.NotBefore, 0)
+ return verifyNbf(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+ return verifyIss(c.Issuer, cmp, req)
+}
+
+// ----- helpers
+
+func verifyAud(aud []string, cmp string, required bool) bool {
+ if len(aud) == 0 {
+ return !required
+ }
+ // use a var here to keep constant time compare when looping over a number of claims
+ result := false
+
+ var stringClaims string
+ for _, a := range aud {
+ if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+ result = true
+ }
+ stringClaims = stringClaims + a
+ }
+
+ // case where "" is sent in one or many aud claims
+ if len(stringClaims) == 0 {
+ return !required
+ }
+
+ return result
+}
+
+func verifyExp(exp *time.Time, now time.Time, required bool) bool {
+ if exp == nil {
+ return !required
+ }
+ return now.Before(*exp)
+}
+
+func verifyIat(iat *time.Time, now time.Time, required bool) bool {
+ if iat == nil {
+ return !required
+ }
+ return now.After(*iat) || now.Equal(*iat)
+}
+
+func verifyNbf(nbf *time.Time, now time.Time, required bool) bool {
+ if nbf == nil {
+ return !required
+ }
+ return now.After(*nbf) || now.Equal(*nbf)
+}
+
+func verifyIss(iss string, cmp string, required bool) bool {
+ if iss == "" {
+ return !required
+ }
+ return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0
+}
diff --git a/vendor/github.com/golang-jwt/jwt/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go
similarity index 100%
rename from vendor/github.com/golang-jwt/jwt/doc.go
rename to vendor/github.com/golang-jwt/jwt/v4/doc.go
diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
similarity index 94%
rename from vendor/github.com/golang-jwt/jwt/ecdsa.go
rename to vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
index 15e23435..eac023fc 100644
--- a/vendor/github.com/golang-jwt/jwt/ecdsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
@@ -13,7 +13,7 @@ var (
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
)
-// Implements the ECDSA family of signing methods signing methods
+// SigningMethodECDSA implements the ECDSA family of signing methods.
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
type SigningMethodECDSA struct {
Name string
@@ -53,7 +53,7 @@ func (m *SigningMethodECDSA) Alg() string {
return m.Name
}
-// Implements the Verify method from SigningMethod
+// Verify implements token verification for the SigningMethod.
// For this verify method, key must be an ecdsa.PublicKey struct
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
var err error
@@ -95,7 +95,7 @@ func (m *SigningMethodECDSA) Verify(signingString, signature string, key interfa
return ErrECDSAVerification
}
-// Implements the Sign method from SigningMethod
+// Sign implements token signing for the SigningMethod.
// For this signing method, key must be an ecdsa.PrivateKey struct
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
// Get the key
diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
similarity index 81%
rename from vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
rename to vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
index db9f4be7..5700636d 100644
--- a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
@@ -8,11 +8,11 @@ import (
)
var (
- ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
- ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
)
-// Parse PEM encoded Elliptic Curve Private Key Structure
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
var err error
@@ -39,7 +39,7 @@ func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
return pkey, nil
}
-// Parse PEM encoded PKCS1 or PKCS8 public key
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
var err error
diff --git a/vendor/github.com/golang-jwt/jwt/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
similarity index 70%
rename from vendor/github.com/golang-jwt/jwt/ed25519.go
rename to vendor/github.com/golang-jwt/jwt/v4/ed25519.go
index a2f8ddbe..07d3aacd 100644
--- a/vendor/github.com/golang-jwt/jwt/ed25519.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
@@ -3,14 +3,16 @@ package jwt
import (
"errors"
+ "crypto"
"crypto/ed25519"
+ "crypto/rand"
)
var (
ErrEd25519Verification = errors.New("ed25519: verification error")
)
-// Implements the EdDSA family
+// SigningMethodEd25519 implements the EdDSA family.
// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
type SigningMethodEd25519 struct{}
@@ -30,7 +32,7 @@ func (m *SigningMethodEd25519) Alg() string {
return "EdDSA"
}
-// Implements the Verify method from SigningMethod
+// Verify implements token verification for the SigningMethod.
// For this verify method, key must be an ed25519.PublicKey
func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
var err error
@@ -59,23 +61,25 @@ func (m *SigningMethodEd25519) Verify(signingString, signature string, key inter
return nil
}
-// Implements the Sign method from SigningMethod
+// Sign implements token signing for the SigningMethod.
// For this signing method, key must be an ed25519.PrivateKey
func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
- var ed25519Key ed25519.PrivateKey
+ var ed25519Key crypto.Signer
var ok bool
- if ed25519Key, ok = key.(ed25519.PrivateKey); !ok {
+ if ed25519Key, ok = key.(crypto.Signer); !ok {
return "", ErrInvalidKeyType
}
- // ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize
- // this allows to avoid recover usage
- if len(ed25519Key) != ed25519.PrivateKeySize {
+ if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
return "", ErrInvalidKey
}
// Sign the string and return the encoded result
- sig := ed25519.Sign(ed25519Key, []byte(signingString))
+ // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0)
+ sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+ if err != nil {
+ return "", err
+ }
return EncodeSegment(sig), nil
}
diff --git a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
similarity index 80%
rename from vendor/github.com/golang-jwt/jwt/ed25519_utils.go
rename to vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
index c6357275..cdb5e68e 100644
--- a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
@@ -9,11 +9,11 @@ import (
)
var (
- ErrNotEdPrivateKey = errors.New("Key is not a valid Ed25519 private key")
- ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key")
+ ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
)
-// Parse PEM-encoded Edwards curve private key
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
var err error
@@ -38,7 +38,7 @@ func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
return pkey, nil
}
-// Parse PEM-encoded Edwards curve public key
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
var err error
diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go
new file mode 100644
index 00000000..10ac8835
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go
@@ -0,0 +1,112 @@
+package jwt
+
+import (
+ "errors"
+)
+
+// Error constants
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+
+ ErrTokenMalformed = errors.New("token is malformed")
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+ ErrTokenInvalidId = errors.New("token has invalid id")
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
+)
+
+// The errors that might occur when parsing and validating a token
+const (
+ ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
+ ValidationErrorUnverifiable // Token could not be verified because of signing problems
+ ValidationErrorSignatureInvalid // Signature validation failed
+
+ // Standard Claim validation errors
+ ValidationErrorAudience // AUD validation failed
+ ValidationErrorExpired // EXP validation failed
+ ValidationErrorIssuedAt // IAT validation failed
+ ValidationErrorIssuer // ISS validation failed
+ ValidationErrorNotValidYet // NBF validation failed
+ ValidationErrorId // JTI validation failed
+ ValidationErrorClaimsInvalid // Generic claims validation error
+)
+
+// NewValidationError is a helper for constructing a ValidationError with a string error message
+func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
+ return &ValidationError{
+ text: errorText,
+ Errors: errorFlags,
+ }
+}
+
+// ValidationError represents an error from Parse if token is not valid
+type ValidationError struct {
+ Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
+ Errors uint32 // bitfield. see ValidationError... constants
+ text string // errors that do not have a valid error just have text
+}
+
+// Error is the implementation of the err interface.
+func (e ValidationError) Error() string {
+ if e.Inner != nil {
+ return e.Inner.Error()
+ } else if e.text != "" {
+ return e.text
+ } else {
+ return "token is invalid"
+ }
+}
+
+// Unwrap gives errors.Is and errors.As access to the inner error.
+func (e *ValidationError) Unwrap() error {
+ return e.Inner
+}
+
+// No errors
+func (e *ValidationError) valid() bool {
+ return e.Errors == 0
+}
+
+// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message
+// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use
+// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables.
+func (e *ValidationError) Is(err error) bool {
+ // Check, if our inner error is a direct match
+ if errors.Is(errors.Unwrap(e), err) {
+ return true
+ }
+
+ // Otherwise, we need to match using our error flags
+ switch err {
+ case ErrTokenMalformed:
+ return e.Errors&ValidationErrorMalformed != 0
+ case ErrTokenUnverifiable:
+ return e.Errors&ValidationErrorUnverifiable != 0
+ case ErrTokenSignatureInvalid:
+ return e.Errors&ValidationErrorSignatureInvalid != 0
+ case ErrTokenInvalidAudience:
+ return e.Errors&ValidationErrorAudience != 0
+ case ErrTokenExpired:
+ return e.Errors&ValidationErrorExpired != 0
+ case ErrTokenUsedBeforeIssued:
+ return e.Errors&ValidationErrorIssuedAt != 0
+ case ErrTokenInvalidIssuer:
+ return e.Errors&ValidationErrorIssuer != 0
+ case ErrTokenNotValidYet:
+ return e.Errors&ValidationErrorNotValidYet != 0
+ case ErrTokenInvalidId:
+ return e.Errors&ValidationErrorId != 0
+ case ErrTokenInvalidClaims:
+ return e.Errors&ValidationErrorClaimsInvalid != 0
+ }
+
+ return false
+}
diff --git a/vendor/github.com/golang-jwt/jwt/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go
similarity index 90%
rename from vendor/github.com/golang-jwt/jwt/hmac.go
rename to vendor/github.com/golang-jwt/jwt/v4/hmac.go
index addbe5d4..011f68a2 100644
--- a/vendor/github.com/golang-jwt/jwt/hmac.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go
@@ -6,7 +6,7 @@ import (
"errors"
)
-// Implements the HMAC-SHA family of signing methods signing methods
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
// Expects key type of []byte for both signing and validation
type SigningMethodHMAC struct {
Name string
@@ -45,7 +45,7 @@ func (m *SigningMethodHMAC) Alg() string {
return m.Name
}
-// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
+// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid.
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
// Verify the key is the right type
keyBytes, ok := key.([]byte)
@@ -77,7 +77,7 @@ func (m *SigningMethodHMAC) Verify(signingString, signature string, key interfac
return nil
}
-// Implements the Sign method from SigningMethod for this signing method.
+// Sign implements token signing for the SigningMethod.
// Key must be []byte
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
if keyBytes, ok := key.([]byte); ok {
diff --git a/vendor/github.com/golang-jwt/jwt/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
similarity index 54%
rename from vendor/github.com/golang-jwt/jwt/map_claims.go
rename to vendor/github.com/golang-jwt/jwt/v4/map_claims.go
index 72c79f92..2700d64a 100644
--- a/vendor/github.com/golang-jwt/jwt/map_claims.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
@@ -3,10 +3,11 @@ package jwt
import (
"encoding/json"
"errors"
+ "time"
// "fmt"
)
-// Claims type that uses the map[string]interface{} for JSON decoding
+// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding.
// This is the default claims type if you don't supply one
type MapClaims map[string]interface{}
@@ -31,65 +32,92 @@ func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
return verifyAud(aud, cmp, req)
}
-// Compares the exp claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
+// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp).
+// If req is false, it will return true, if exp is unset.
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
- exp, ok := m["exp"]
+ cmpTime := time.Unix(cmp, 0)
+
+ v, ok := m["exp"]
if !ok {
return !req
}
- switch expType := exp.(type) {
+
+ switch exp := v.(type) {
case float64:
- return verifyExp(int64(expType), cmp, req)
+ if exp == 0 {
+ return verifyExp(nil, cmpTime, req)
+ }
+
+ return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req)
case json.Number:
- v, _ := expType.Int64()
- return verifyExp(v, cmp, req)
+ v, _ := exp.Float64()
+
+ return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req)
}
+
return false
}
-// Compares the iat claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
+// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
- iat, ok := m["iat"]
+ cmpTime := time.Unix(cmp, 0)
+
+ v, ok := m["iat"]
if !ok {
return !req
}
- switch iatType := iat.(type) {
+
+ switch iat := v.(type) {
case float64:
- return verifyIat(int64(iatType), cmp, req)
+ if iat == 0 {
+ return verifyIat(nil, cmpTime, req)
+ }
+
+ return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req)
case json.Number:
- v, _ := iatType.Int64()
- return verifyIat(v, cmp, req)
+ v, _ := iat.Float64()
+
+ return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req)
}
+
return false
}
-// Compares the iss claim against cmp.
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ cmpTime := time.Unix(cmp, 0)
+
+ v, ok := m["nbf"]
+ if !ok {
+ return !req
+ }
+
+ switch nbf := v.(type) {
+ case float64:
+ if nbf == 0 {
+ return verifyNbf(nil, cmpTime, req)
+ }
+
+ return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req)
+ case json.Number:
+ v, _ := nbf.Float64()
+
+ return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+ }
+
+ return false
+}
+
+// VerifyIssuer compares the iss claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
iss, _ := m["iss"].(string)
return verifyIss(iss, cmp, req)
}
-// Compares the nbf claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
- nbf, ok := m["nbf"]
- if !ok {
- return !req
- }
- switch nbfType := nbf.(type) {
- case float64:
- return verifyNbf(int64(nbfType), cmp, req)
- case json.Number:
- v, _ := nbfType.Int64()
- return verifyNbf(v, cmp, req)
- }
- return false
-}
-
-// Validates time based claims "exp, iat, nbf".
+// Valid validates time based claims "exp, iat, nbf".
// There is no accounting for clock skew.
// As well, if any of the above claims are not in the token, it will still
// be considered a valid claim.
@@ -98,16 +126,19 @@ func (m MapClaims) Valid() error {
now := TimeFunc().Unix()
if !m.VerifyExpiresAt(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenExpired
vErr.Inner = errors.New("Token is expired")
vErr.Errors |= ValidationErrorExpired
}
if !m.VerifyIssuedAt(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued
vErr.Inner = errors.New("Token used before issued")
vErr.Errors |= ValidationErrorIssuedAt
}
if !m.VerifyNotBefore(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenNotValidYet
vErr.Inner = errors.New("Token is not valid yet")
vErr.Errors |= ValidationErrorNotValidYet
}
diff --git a/vendor/github.com/golang-jwt/jwt/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go
similarity index 94%
rename from vendor/github.com/golang-jwt/jwt/none.go
rename to vendor/github.com/golang-jwt/jwt/v4/none.go
index f04d189d..f19835d2 100644
--- a/vendor/github.com/golang-jwt/jwt/none.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/none.go
@@ -1,6 +1,6 @@
package jwt
-// Implements the none signing method. This is required by the spec
+// SigningMethodNone implements the none signing method. This is required by the spec
// but you probably should never use it.
var SigningMethodNone *signingMethodNone
diff --git a/vendor/github.com/golang-jwt/jwt/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go
similarity index 53%
rename from vendor/github.com/golang-jwt/jwt/parser.go
rename to vendor/github.com/golang-jwt/jwt/v4/parser.go
index d6901d9a..0fc510a0 100644
--- a/vendor/github.com/golang-jwt/jwt/parser.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go
@@ -7,19 +7,52 @@ import (
"strings"
)
+const tokenDelimiter = "."
+
type Parser struct {
- ValidMethods []string // If populated, only these methods will be considered valid
- UseJSONNumber bool // Use JSON Number format in JSON decoder
- SkipClaimsValidation bool // Skip claims validation during token parsing
+ // If populated, only these methods will be considered valid.
+ //
+ // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+ ValidMethods []string
+
+ // Use JSON Number format in JSON decoder.
+ //
+ // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+ UseJSONNumber bool
+
+ // Skip claims validation during token parsing.
+ //
+ // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+ SkipClaimsValidation bool
}
-// Parse, validate, and return a token.
-// keyFunc will receive the parsed token and should return the key for validating.
-// If everything is kosher, err will be nil
+// NewParser creates a new Parser with the specified options
+func NewParser(options ...ParserOption) *Parser {
+ p := &Parser{}
+
+ // loop through our parsing options and apply them
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token. keyFunc will
+// receive the parsed token and should return the key for validating.
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
}
+// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object
+// implementing the Claims interface. This provides default values which can be overridden and
+// allows a caller to use their own type, rather than the default MapClaims implementation of
+// Claims.
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such
+// as RegisteredClaims), make sure that a) you either embed a non-pointer version of the claims or
+// b) if you are using a pointer, allocate the proper memory for it before passing in the overall
+// claims, otherwise you might run into a panic.
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
token, parts, err := p.ParseUnverified(tokenString, claims)
if err != nil {
@@ -56,12 +89,17 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
}
+ // Perform validation
+ token.Signature = parts[2]
+ if err := token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorSignatureInvalid}
+ }
+
vErr := &ValidationError{}
// Validate Claims
if !p.SkipClaimsValidation {
if err := token.Claims.Valid(); err != nil {
-
// If the Claims Valid returned an error, check if it is a validation error,
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
if e, ok := err.(*ValidationError); !ok {
@@ -69,34 +107,27 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
} else {
vErr = e
}
+ return token, vErr
}
}
- // Perform validation
- token.Signature = parts[2]
- if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
- vErr.Inner = err
- vErr.Errors |= ValidationErrorSignatureInvalid
- }
+ // No errors so far, token is valid.
+ token.Valid = true
- if vErr.valid() {
- token.Valid = true
- return token, nil
- }
-
- return token, vErr
+ return token, nil
}
-// WARNING: Don't use this method unless you know what you're doing
+// ParseUnverified parses the token but doesn't validate the signature.
//
-// This method parses the token but doesn't validate the signature. It's only
-// ever useful in cases where you know the signature is valid (because it has
-// been checked previously in the stack) and you want to extract values from
-// it.
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from it.
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
- parts = strings.Split(tokenString, ".")
- if len(parts) != 3 {
- return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+ var ok bool
+ parts, ok = splitToken(tokenString)
+ if !ok {
+ return nil, nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
}
token = &Token{Raw: tokenString}
@@ -146,3 +177,30 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
return token, parts, nil
}
+
+// splitToken splits a token string into three parts: header, claims, and signature. It will only
+// return true if the token contains exactly two delimiters and three parts. In all other cases, it
+// will return nil parts and false.
+func splitToken(token string) ([]string, bool) {
+ parts := make([]string, 3)
+ header, remain, ok := strings.Cut(token, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[0] = header
+ claims, remain, ok := strings.Cut(remain, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[1] = claims
+ // One more cut to ensure the signature is the last part of the token and there are no more
+ // delimiters. This avoids an issue where malicious input could contain additional delimiters
+ // causing unecessary overhead parsing tokens.
+ signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
+ if unexpected {
+ return nil, false
+ }
+ parts[2] = signature
+
+ return parts, true
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go
new file mode 100644
index 00000000..6ea6f952
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go
@@ -0,0 +1,29 @@
+package jwt
+
+// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add
+// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that
+// takes a *Parser type as input and manipulates its configuration accordingly.
+type ParserOption func(*Parser)
+
+// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid.
+// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
+func WithValidMethods(methods []string) ParserOption {
+ return func(p *Parser) {
+ p.ValidMethods = methods
+ }
+}
+
+// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber
+func WithJSONNumber() ParserOption {
+ return func(p *Parser) {
+ p.UseJSONNumber = true
+ }
+}
+
+// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know
+// what you are doing.
+func WithoutClaimsValidation() ParserOption {
+ return func(p *Parser) {
+ p.SkipClaimsValidation = true
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go
similarity index 92%
rename from vendor/github.com/golang-jwt/jwt/rsa.go
rename to vendor/github.com/golang-jwt/jwt/v4/rsa.go
index e4caf1ca..b910b19c 100644
--- a/vendor/github.com/golang-jwt/jwt/rsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go
@@ -6,7 +6,7 @@ import (
"crypto/rsa"
)
-// Implements the RSA family of signing methods signing methods
+// SigningMethodRSA implements the RSA family of signing methods.
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
type SigningMethodRSA struct {
Name string
@@ -44,7 +44,7 @@ func (m *SigningMethodRSA) Alg() string {
return m.Name
}
-// Implements the Verify method from SigningMethod
+// Verify implements token verification for the SigningMethod
// For this signing method, must be an *rsa.PublicKey structure.
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
var err error
@@ -73,7 +73,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
}
-// Implements the Sign method from SigningMethod
+// Sign implements token signing for the SigningMethod
// For this signing method, must be an *rsa.PrivateKey structure.
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
diff --git a/vendor/github.com/golang-jwt/jwt/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
similarity index 93%
rename from vendor/github.com/golang-jwt/jwt/rsa_pss.go
rename to vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
index c0147086..4fd6f9e6 100644
--- a/vendor/github.com/golang-jwt/jwt/rsa_pss.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
@@ -1,3 +1,4 @@
+//go:build go1.4
// +build go1.4
package jwt
@@ -8,7 +9,7 @@ import (
"crypto/rsa"
)
-// Implements the RSAPSS family of signing methods signing methods
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
type SigningMethodRSAPSS struct {
*SigningMethodRSA
Options *rsa.PSSOptions
@@ -79,7 +80,7 @@ func init() {
})
}
-// Implements the Verify method from SigningMethod
+// Verify implements token verification for the SigningMethod.
// For this verify method, key must be an rsa.PublicKey struct
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
var err error
@@ -113,7 +114,7 @@ func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interf
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
}
-// Implements the Sign method from SigningMethod
+// Sign implements token signing for the SigningMethod.
// For this signing method, key must be an rsa.PrivateKey struct
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
diff --git a/vendor/github.com/golang-jwt/jwt/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
similarity index 72%
rename from vendor/github.com/golang-jwt/jwt/rsa_utils.go
rename to vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
index 14c78c29..1966c450 100644
--- a/vendor/github.com/golang-jwt/jwt/rsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
@@ -8,12 +8,12 @@ import (
)
var (
- ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key")
- ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
- ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
+ ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
)
-// Parse PEM encoded PKCS1 or PKCS8 private key
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
var err error
@@ -39,7 +39,11 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
return pkey, nil
}
-// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
var err error
@@ -71,7 +75,7 @@ func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.Pr
return pkey, nil
}
-// Parse PEM encoded PKCS1 or PKCS8 public key
+// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
var err error
diff --git a/vendor/github.com/golang-jwt/jwt/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go
similarity index 66%
rename from vendor/github.com/golang-jwt/jwt/signing_method.go
rename to vendor/github.com/golang-jwt/jwt/v4/signing_method.go
index ed1f212b..241ae9c6 100644
--- a/vendor/github.com/golang-jwt/jwt/signing_method.go
+++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go
@@ -7,14 +7,14 @@ import (
var signingMethods = map[string]func() SigningMethod{}
var signingMethodLock = new(sync.RWMutex)
-// Implement SigningMethod to add new methods for signing or verifying tokens.
+// SigningMethod can be used add new methods for signing or verifying tokens.
type SigningMethod interface {
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
Alg() string // returns the alg identifier for this method (example: 'HS256')
}
-// Register the "alg" name and a factory function for signing method.
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
// This is typically done during init() in the method's implementation
func RegisterSigningMethod(alg string, f func() SigningMethod) {
signingMethodLock.Lock()
@@ -23,7 +23,7 @@ func RegisterSigningMethod(alg string, f func() SigningMethod) {
signingMethods[alg] = f
}
-// Get a signing method from an "alg" string
+// GetSigningMethod retrieves a signing method from an "alg" string
func GetSigningMethod(alg string) (method SigningMethod) {
signingMethodLock.RLock()
defer signingMethodLock.RUnlock()
@@ -33,3 +33,14 @@ func GetSigningMethod(alg string) (method SigningMethod) {
}
return
}
+
+// GetAlgorithms returns a list of registered "alg" names
+func GetAlgorithms() (algs []string) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ for alg := range signingMethods {
+ algs = append(algs, alg)
+ }
+ return
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf
new file mode 100644
index 00000000..53745d51
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go
new file mode 100644
index 00000000..786b275c
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/token.go
@@ -0,0 +1,143 @@
+package jwt
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "strings"
+ "time"
+)
+
+// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515
+// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations
+// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global
+// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe.
+// To use the non-recommended decoding, set this boolean to `true` prior to using this package.
+var DecodePaddingAllowed bool
+
+// DecodeStrict will switch the codec used for decoding JWTs into strict mode.
+// In this mode, the decoder requires that trailing padding bits are zero, as described in RFC 4648 section 3.5.
+// Note that this is a global variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe.
+// To use strict decoding, set this boolean to `true` prior to using this package.
+var DecodeStrict bool
+
+// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+// You can override it to use another time value. This is useful for testing or if your
+// server uses a different time zone than your tokens.
+var TimeFunc = time.Now
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification. The function receives the parsed,
+// but unverified Token. This allows you to use properties in the
+// Header of the token (such as `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// Token represents a JWT Token. Different fields will be used depending on whether you're
+// creating or parsing/verifying a token.
+type Token struct {
+ Raw string // The raw token. Populated when you Parse a token
+ Method SigningMethod // The signing method used or to be used
+ Header map[string]interface{} // The first segment of the token
+ Claims Claims // The second segment of the token
+ Signature string // The third segment of the token. Populated when you Parse a token
+ Valid bool // Is the token valid? Populated when you Parse/Verify a token
+}
+
+// New creates a new Token with the specified signing method and an empty map of claims.
+func New(method SigningMethod) *Token {
+ return NewWithClaims(method, MapClaims{})
+}
+
+// NewWithClaims creates a new Token with the specified signing method and claims.
+func NewWithClaims(method SigningMethod, claims Claims) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// SignedString creates and returns a complete, signed JWT.
+// The token is signed using the SigningMethod specified in the token.
+func (t *Token) SignedString(key interface{}) (string, error) {
+ var sig, sstr string
+ var err error
+ if sstr, err = t.SigningString(); err != nil {
+ return "", err
+ }
+ if sig, err = t.Method.Sign(sstr, key); err != nil {
+ return "", err
+ }
+ return strings.Join([]string{sstr, sig}, "."), nil
+}
+
+// SigningString generates the signing string. This is the
+// most expensive part of the whole deal. Unless you
+// need this for something special, just go straight for
+// the SignedString.
+func (t *Token) SigningString() (string, error) {
+ var err error
+ var jsonValue []byte
+
+ if jsonValue, err = json.Marshal(t.Header); err != nil {
+ return "", err
+ }
+ header := EncodeSegment(jsonValue)
+
+ if jsonValue, err = json.Marshal(t.Claims); err != nil {
+ return "", err
+ }
+ claim := EncodeSegment(jsonValue)
+
+ return strings.Join([]string{header, claim}, "."), nil
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the cryptographic key
+// for verifying the signature.
+// The caller is strongly encouraged to set the WithValidMethods option to
+// validate the 'alg' claim in the token matches the expected algorithm.
+// For more details about the importance of validating the 'alg' claim,
+// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).Parse(tokenString, keyFunc)
+}
+
+// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding stripped
+//
+// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+// should only be used internally
+func EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding with padding stripped
+//
+// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+// should only be used internally
+func DecodeSegment(seg string) ([]byte, error) {
+ encoding := base64.RawURLEncoding
+
+ if DecodePaddingAllowed {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ encoding = base64.URLEncoding
+ }
+
+ if DecodeStrict {
+ encoding = encoding.Strict()
+ }
+ return encoding.DecodeString(seg)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go
new file mode 100644
index 00000000..ac8e140e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/types.go
@@ -0,0 +1,145 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// TimePrecision sets the precision of times and dates within this library.
+// This has an influence on the precision of times when comparing expiry or
+// other related time fields. Furthermore, it is also the precision of times
+// when serializing.
+//
+// For backwards compatibility the default precision is set to seconds, so that
+// no fractional timestamps are generated.
+var TimePrecision = time.Second
+
+// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially
+// its MarshalJSON function.
+//
+// If it is set to true (the default), it will always serialize the type as an
+// array of strings, even if it just contains one element, defaulting to the behaviour
+// of the underlying []string. If it is set to false, it will serialize to a single
+// string, if it contains one element. Otherwise, it will serialize to an array of strings.
+var MarshalSingleStringAsArray = true
+
+// NumericDate represents a JSON numeric date value, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+type NumericDate struct {
+ time.Time
+}
+
+// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+// It will truncate the timestamp according to the precision specified in TimePrecision.
+func NewNumericDate(t time.Time) *NumericDate {
+ return &NumericDate{t.Truncate(TimePrecision)}
+}
+
+// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+// UNIX epoch with the float fraction representing non-integer seconds.
+func newNumericDateFromSeconds(f float64) *NumericDate {
+ round, frac := math.Modf(f)
+ return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+}
+
+// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+func (date NumericDate) MarshalJSON() (b []byte, err error) {
+ var prec int
+ if TimePrecision < time.Second {
+ prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+ }
+ truncatedDate := date.Truncate(TimePrecision)
+
+ // For very large timestamps, UnixNano would overflow an int64, but this
+ // function requires nanosecond level precision, so we have to use the
+ // following technique to get round the issue:
+ // 1. Take the normal unix timestamp to form the whole number part of the
+ // output,
+ // 2. Take the result of the Nanosecond function, which retuns the offset
+ // within the second of the particular unix time instance, to form the
+ // decimal part of the output
+ // 3. Concatenate them to produce the final result
+ seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+ nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+ output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+ return output, nil
+}
+
+// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a
+// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch
+// with either integer or non-integer seconds.
+func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+ var (
+ number json.Number
+ f float64
+ )
+
+ if err = json.Unmarshal(b, &number); err != nil {
+ return fmt.Errorf("could not parse NumericData: %w", err)
+ }
+
+ if f, err = number.Float64(); err != nil {
+ return fmt.Errorf("could not convert json number value to float: %w", err)
+ }
+
+ n := newNumericDateFromSeconds(f)
+ *date = *n
+
+ return nil
+}
+
+// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string.
+// This type is necessary, since the "aud" claim can either be a single string or an array.
+type ClaimStrings []string
+
+func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+ var value interface{}
+
+ if err = json.Unmarshal(data, &value); err != nil {
+ return err
+ }
+
+ var aud []string
+
+ switch v := value.(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = ClaimStrings(v)
+ case []interface{}:
+ for _, vv := range v {
+ vs, ok := vv.(string)
+ if !ok {
+ return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)}
+ }
+ aud = append(aud, vs)
+ }
+ case nil:
+ return nil
+ default:
+ return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)}
+ }
+
+ *s = aud
+
+ return
+}
+
+func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+ // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field,
+ // only contains one element, it MAY be serialized as a single string. This may or may not be
+ // desired based on the ecosystem of other JWT library used, so we make it configurable by the
+ // variable MarshalSingleStringAsArray.
+ if len(s) == 1 && !MarshalSingleStringAsArray {
+ return json.Marshal(s[0])
+ }
+
+ return json.Marshal([]string(s))
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/.gitignore b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
new file mode 100644
index 00000000..09573e01
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+.idea/
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/LICENSE b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
new file mode 100644
index 00000000..35dbc252
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
new file mode 100644
index 00000000..b3178e75
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
@@ -0,0 +1,195 @@
+# Migration Guide (v5.0.0)
+
+Version `v5` contains a major rework of core functionalities in the `jwt-go`
+library. This includes support for several validation options as well as a
+re-design of the `Claims` interface. Lastly, we reworked how errors work under
+the hood, which should provide a better overall developer experience.
+
+Starting from [v5.0.0](https://github.com/golang-jwt/jwt/releases/tag/v5.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v5"
+
+For most users, changing the import path *should* suffice. However, since we
+intentionally changed and cleaned some of the public API, existing programs
+might need to be updated. The following sections describe significant changes
+and corresponding updates for existing programs.
+
+## Parsing and Validation Options
+
+Under the hood, a new `Validator` struct takes care of validating the claims. A
+long awaited feature has been the option to fine-tune the validation of tokens.
+This is now possible with several `ParserOption` functions that can be appended
+to most `Parse` functions, such as `ParseWithClaims`. The most important options
+and changes are:
+ * Added `WithLeeway` to support specifying the leeway that is allowed when
+ validating time-based claims, such as `exp` or `nbf`.
+ * Changed default behavior to not check the `iat` claim. Usage of this claim
+ is OPTIONAL according to the JWT RFC. The claim itself is also purely
+ informational according to the RFC, so a strict validation failure is not
+ recommended. If you want to check for sensible values in these claims,
+ please use the `WithIssuedAt` parser option.
+ * Added `WithAudience`, `WithSubject` and `WithIssuer` to support checking for
+ expected `aud`, `sub` and `iss`.
+ * Added `WithStrictDecoding` and `WithPaddingAllowed` options to allow
+ previously global settings to enable base64 strict encoding and the parsing
+ of base64 strings with padding. The latter is strictly speaking against the
+ standard, but unfortunately some of the major identity providers issue some
+ of these incorrect tokens. Both options are disabled by default.
+
+## Changes to the `Claims` interface
+
+### Complete Restructuring
+
+Previously, the claims interface was satisfied with an implementation of a
+`Valid() error` function. This had several issues:
+ * The different claim types (struct claims, map claims, etc.) then contained
+ similar (but not 100 % identical) code of how this validation was done. This
+ lead to a lot of (almost) duplicate code and was hard to maintain
+ * It was not really semantically close to what a "claim" (or a set of claims)
+ really is; which is a list of defined key/value pairs with a certain
+ semantic meaning.
+
+Since all the validation functionality is now extracted into the validator, all
+`VerifyXXX` and `Valid` functions have been removed from the `Claims` interface.
+Instead, the interface now represents a list of getters to retrieve values with
+a specific meaning. This allows us to completely decouple the validation logic
+with the underlying storage representation of the claim, which could be a
+struct, a map or even something stored in a database.
+
+```go
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
+```
+
+Users that previously directly called the `Valid` function on their claims,
+e.g., to perform validation independently of parsing/verifying a token, can now
+use the `jwt.NewValidator` function to create a `Validator` independently of the
+`Parser`.
+
+```go
+var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second))
+v.Validate(myClaims)
+```
+
+### Supported Claim Types and Removal of `StandardClaims`
+
+The two standard claim types supported by this library, `MapClaims` and
+`RegisteredClaims` both implement the necessary functions of this interface. The
+old `StandardClaims` struct, which has already been deprecated in `v4` is now
+removed.
+
+Users using custom claims, in most cases, will not experience any changes in the
+behavior as long as they embedded `RegisteredClaims`. If they created a new
+claim type from scratch, they now need to implemented the proper getter
+functions.
+
+### Migrating Application Specific Logic of the old `Valid`
+
+Previously, users could override the `Valid` method in a custom claim, for
+example to extend the validation with application-specific claims. However, this
+was always very dangerous, since once could easily disable the standard
+validation and signature checking.
+
+In order to avoid that, while still supporting the use-case, a new
+`ClaimsValidator` interface has been introduced. This interface consists of the
+`Validate() error` function. If the validator sees, that a `Claims` struct
+implements this interface, the errors returned to the `Validate` function will
+be *appended* to the regular standard validation. It is not possible to disable
+the standard validation anymore (even only by accident).
+
+Usage examples can be found in [example_test.go](./example_test.go), to build
+claims structs like the following.
+
+```go
+// MyCustomClaims includes all registered claims, plus Foo.
+type MyCustomClaims struct {
+ Foo string `json:"foo"`
+ jwt.RegisteredClaims
+}
+
+// Validate can be used to execute additional application-specific claims
+// validation.
+func (m MyCustomClaims) Validate() error {
+ if m.Foo != "bar" {
+ return errors.New("must be foobar")
+ }
+
+ return nil
+}
+```
+
+## Changes to the `Token` and `Parser` struct
+
+The previously global functions `DecodeSegment` and `EncodeSegment` were moved
+to the `Parser` and `Token` struct respectively. This will allow us in the
+future to configure the behavior of these two based on options supplied on the
+parser or the token (creation). This also removes two previously global
+variables and moves them to parser options `WithStrictDecoding` and
+`WithPaddingAllowed`.
+
+In order to do that, we had to adjust the way signing methods work. Previously
+they were given a base64 encoded signature in `Verify` and were expected to
+return a base64 encoded version of the signature in `Sign`, both as a `string`.
+However, this made it necessary to have `DecodeSegment` and `EncodeSegment`
+global and was a less than perfect design because we were repeating
+encoding/decoding steps for all signing methods. Now, `Sign` and `Verify`
+operate on a decoded signature as a `[]byte`, which feels more natural for a
+cryptographic operation anyway. Lastly, `Parse` and `SignedString` take care of
+the final encoding/decoding part.
+
+In addition to that, we also changed the `Signature` field on `Token` from a
+`string` to `[]byte` and this is also now populated with the decoded form. This
+is also more consistent, because the other parts of the JWT, mainly `Header` and
+`Claims` were already stored in decoded form in `Token`. Only the signature was
+stored in base64 encoded form, which was redundant with the information in the
+`Raw` field, which contains the complete token as base64.
+
+```go
+type Token struct {
+ Raw string // Raw contains the raw token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]any // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form
+ Valid bool // Valid specifies if the token is valid
+}
+```
+
+Most (if not all) of these changes should not impact the normal usage of this
+library. Only users directly accessing the `Signature` field as well as
+developers of custom signing methods should be affected.
+
+# Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in
+this repo, as well as `github.com/dgrijalva/jwt-go`. For most users this should
+be a drop-in replacement, if you're having troubles migrating, please open an
+issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or
+`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually
+or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+# Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at
+https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md
new file mode 100644
index 00000000..0bb636f2
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/README.md
@@ -0,0 +1,167 @@
+# jwt-go
+
+[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+[](https://coveralls.io/github/golang-jwt/jwt?branch=main)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness)
+implementation of [JSON Web
+Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
+this project adds Go module support, but maintains backward compatibility with
+older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
+[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
+v5.0.0 introduces major improvements to the validation of tokens, but is not
+entirely backward compatible.
+
+> After the original author of the library suggested migrating the maintenance
+> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
+> existing library into this repository. See
+> [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a
+> detailed discussion on this topic.
+
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the
+crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue
+[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
+detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is
+what you
+expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
+This library attempts to make it easy to do the right thing by requiring key
+types to match the expected alg, but you should take the extra step to verify it in
+your usage. See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release
+policy](https://golang.org/doc/devel/release#policy). So we will support a major
+version of Go until there are two newer major releases. We no longer support
+building jwt-go with unsupported Go versions, as these contain security
+vulnerabilities that will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web
+Tokens.
+
+In short, it's a signed JSON object that does something useful (for example,
+authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is
+made of three parts, separated by `.`'s. The first two parts are JSON objects,
+that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648)
+encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for
+verifying the last part, the signature. For example, which encryption method
+was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and
+contains the actual stuff you care about. Refer to [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about
+reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and
+signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA,
+RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have
+ [Go](https://go.dev/doc/install) installed, then you can use the command
+ below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v5
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v5"
+```
+
+## Usage
+
+A detailed usage guide, including how to sign and verify tokens can be found on
+our [documentation website](https://golang-jwt.github.io/jwt/usage/create/).
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+for examples of usage:
+
+* [Simple example of parsing and validating a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-Parse-Hmac)
+* [Simple example of building and signing a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-New-Hmac)
+* [Directory of
+ Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#pkg-examples)
+
+## Compliance
+
+This library was last reviewed to comply with [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few
+notable differences:
+
+* In order to protect against accidental use of [Unsecured
+ JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using
+ `alg=none` will only be accepted if the constant
+ `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are
+appreciated. The API should be considered stable. There should be very few
+backward-incompatible changes outside of major version updates (and only with
+good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
+requests will land on `main`. Periodically, versions will be tagged from
+`main`. You can find all the releases on [the project releases
+page](https://github.com/golang-jwt/jwt/releases).
+
+**BREAKING CHANGES:** A full list of breaking changes is available in
+`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating
+your code.
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing
+methods or key functions. Simply implement the `SigningMethod` interface and
+register a factory method using `RegisterSigningMethod` or provide a
+`jwt.Keyfunc`.
+
+A common use case would be integrating with different 3rd party signature
+providers, like key management services from various cloud providers or Hardware
+Security Modules (HSMs) or to implement additional standards.
+
+| Extension | Purpose | Repo |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
+| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by
+third parties and should not be considered as a primary offer by any of the
+mentioned cloud providers
+
+## More
+
+Go package documentation can be found [on
+pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v5). Additional
+documentation can be found on [our project
+page](https://golang-jwt.github.io/jwt/).
+
+The command line utility included in this project (cmd/jwt) provides a
+straightforward example of token creation and parsing as well as a useful tool
+for debugging your own integration. You'll also find several implementation
+examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version
+of the JWT logo, which is distributed under the terms of the [MIT
+License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
new file mode 100644
index 00000000..2740597f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!
diff --git a/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
new file mode 100644
index 00000000..b5039e49
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
@@ -0,0 +1,137 @@
+# `jwt-go` Version History
+
+The following version history is kept for historic purposes. To retrieve the current changes of each version, please refer to the change-log of the specific release versions on https://github.com/golang-jwt/jwt/releases.
+
+## 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
+## 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+## 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+ * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+ * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+ * Added `Claims` interface type to allow users to decode the claims into a custom type
+ * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+ * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+ * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+ * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+ * Added several new, more specific, validation errors to error type bitmask
+ * Moved examples from README to executable example files
+ * Signing method registry is now thread safe
+ * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+ * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
+ * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+ * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+ * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+ * `KeyFunc` now returns `interface{}` instead of `[]byte`
+ * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+ * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodHS256`
+ * Added public package global `SigningMethodHS384`
+ * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodRS256`
+ * Added public package global `SigningMethodRS384`
+ * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+## 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation. No functional changes
+
+## 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+## 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/v5/claims.go b/vendor/github.com/golang-jwt/jwt/v5/claims.go
new file mode 100644
index 00000000..d50ff3da
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/claims.go
@@ -0,0 +1,16 @@
+package jwt
+
+// Claims represent any form of a JWT Claims Set according to
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4. In order to have a
+// common basis for validation, it is required that an implementation is able to
+// supply at least the claim names provided in
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 namely `exp`,
+// `iat`, `nbf`, `iss`, `sub` and `aud`.
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/doc.go b/vendor/github.com/golang-jwt/jwt/v5/doc.go
new file mode 100644
index 00000000..a86dc1a3
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/doc.go
@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
new file mode 100644
index 00000000..06cd94d2
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
@@ -0,0 +1,134 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// SigningMethodECDSA implements the ECDSA family of signing methods.
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key any) error {
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+ return nil
+ }
+
+ return ErrECDSAVerification
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key any) ([]byte, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return nil, ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays
+ // padded with zeros on the left to make sure the sizes work out.
+ // Output must be 2*keyBytes long.
+ out := make([]byte, 2*keyBytes)
+ r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ return out, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
new file mode 100644
index 00000000..44a3b7a1
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
@@ -0,0 +1,69 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
+)
+
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
new file mode 100644
index 00000000..4159e57b
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
@@ -0,0 +1,79 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+ "errors"
+)
+
+var (
+ ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// SigningMethodEd25519 implements the EdDSA family.
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+ SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+ SigningMethodEdDSA = &SigningMethodEd25519{}
+ RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+ return SigningMethodEdDSA
+ })
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+ return "EdDSA"
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key any) error {
+ var ed25519Key ed25519.PublicKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+ return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType)
+ }
+
+ if len(ed25519Key) != ed25519.PublicKeySize {
+ return ErrInvalidKey
+ }
+
+ // Verify the signature
+ if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+ return ErrEd25519Verification
+ }
+
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key any) ([]byte, error) {
+ var ed25519Key crypto.Signer
+ var ok bool
+
+ if ed25519Key, ok = key.(crypto.Signer); !ok {
+ return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType)
+ }
+
+ if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+ return nil, ErrInvalidKey
+ }
+
+ // Sign the string and return the result. ed25519 performs a two-pass hash
+ // as part of its algorithm. Therefore, we need to pass a non-prehashed
+ // message into the Sign function, as indicated by crypto.Hash(0)
+ sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+ if err != nil {
+ return nil, err
+ }
+
+ return sig, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
new file mode 100644
index 00000000..6f46e886
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
@@ -0,0 +1,64 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+)
+
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+ return nil, ErrNotEdPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+ return nil, ErrNotEdPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go
new file mode 100644
index 00000000..14e00751
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go
@@ -0,0 +1,89 @@
+package jwt
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+ ErrTokenMalformed = errors.New("token is malformed")
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+ ErrTokenRequiredClaimMissing = errors.New("token is missing required claim")
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+ ErrTokenInvalidSubject = errors.New("token has invalid subject")
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+ ErrTokenInvalidId = errors.New("token has invalid id")
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
+ ErrInvalidType = errors.New("invalid type for claim")
+)
+
+// joinedError is an error type that works similar to what [errors.Join]
+// produces, with the exception that it has a nice error string; mainly its
+// error messages are concatenated using a comma, rather than a newline.
+type joinedError struct {
+ errs []error
+}
+
+func (je joinedError) Error() string {
+ msg := []string{}
+ for _, err := range je.errs {
+ msg = append(msg, err.Error())
+ }
+
+ return strings.Join(msg, ", ")
+}
+
+// joinErrors joins together multiple errors. Useful for scenarios where
+// multiple errors next to each other occur, e.g., in claims validation.
+func joinErrors(errs ...error) error {
+ return &joinedError{
+ errs: errs,
+ }
+}
+
+// Unwrap implements the multiple error unwrapping for this error type, which is
+// possible in Go 1.20.
+func (je joinedError) Unwrap() []error {
+ return je.errs
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. This makes use of Go 1.20's possibility to
+// include more than one %w formatting directive in [fmt.Errorf].
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ var format string
+ var args []any
+ if message != "" {
+ format = "%w: %s"
+ args = []any{err, message}
+ } else {
+ format = "%w"
+ args = []any{err}
+ }
+
+ for _, e := range more {
+ format += ": %w"
+ args = append(args, e)
+ }
+
+ err = fmt.Errorf(format, args...)
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
new file mode 100644
index 00000000..1bef138c
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
@@ -0,0 +1,104 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod. Returns nil if
+// the signature is valid. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types.
+func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key any) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return newError("HMAC verify expects []byte", ErrInvalidKeyType)
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/.
+func (m *SigningMethodHMAC) Sign(signingString string, key any) ([]byte, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return hasher.Sum(nil), nil
+ }
+
+ return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
new file mode 100644
index 00000000..3b920527
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
@@ -0,0 +1,109 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// MapClaims is a claims type that uses the map[string]any for JSON
+// decoding. This is the default claims type if you don't supply one
+type MapClaims map[string]any
+
+// GetExpirationTime implements the Claims interface.
+func (m MapClaims) GetExpirationTime() (*NumericDate, error) {
+ return m.parseNumericDate("exp")
+}
+
+// GetNotBefore implements the Claims interface.
+func (m MapClaims) GetNotBefore() (*NumericDate, error) {
+ return m.parseNumericDate("nbf")
+}
+
+// GetIssuedAt implements the Claims interface.
+func (m MapClaims) GetIssuedAt() (*NumericDate, error) {
+ return m.parseNumericDate("iat")
+}
+
+// GetAudience implements the Claims interface.
+func (m MapClaims) GetAudience() (ClaimStrings, error) {
+ return m.parseClaimsString("aud")
+}
+
+// GetIssuer implements the Claims interface.
+func (m MapClaims) GetIssuer() (string, error) {
+ return m.parseString("iss")
+}
+
+// GetSubject implements the Claims interface.
+func (m MapClaims) GetSubject() (string, error) {
+ return m.parseString("sub")
+}
+
+// parseNumericDate tries to parse a key in the map claims type as a number
+// date. This will succeed, if the underlying type is either a [float64] or a
+// [json.Number]. Otherwise, nil will be returned.
+func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) {
+ v, ok := m[key]
+ if !ok {
+ return nil, nil
+ }
+
+ switch exp := v.(type) {
+ case float64:
+ if exp == 0 {
+ return nil, nil
+ }
+
+ return newNumericDateFromSeconds(exp), nil
+ case json.Number:
+ v, _ := exp.Float64()
+
+ return newNumericDateFromSeconds(v), nil
+ }
+
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+}
+
+// parseClaimsString tries to parse a key in the map claims type as a
+// [ClaimsStrings] type, which can either be a string or an array of string.
+func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
+ var cs []string
+ switch v := m[key].(type) {
+ case string:
+ cs = append(cs, v)
+ case []string:
+ cs = v
+ case []any:
+ for _, a := range v {
+ vs, ok := a.(string)
+ if !ok {
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+ cs = append(cs, vs)
+ }
+ }
+
+ return cs, nil
+}
+
+// parseString tries to parse a key in the map claims type as a [string] type.
+// If the key does not exist, an empty string is returned. If the key has the
+// wrong type, an error is returned.
+func (m MapClaims) parseString(key string) (string, error) {
+ var (
+ ok bool
+ raw any
+ iss string
+ )
+ raw, ok = m[key]
+ if !ok {
+ return "", nil
+ }
+
+ iss, ok = raw.(string)
+ if !ok {
+ return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+
+ return iss, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go
new file mode 100644
index 00000000..624ad55e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/none.go
@@ -0,0 +1,50 @@
+package jwt
+
+// SigningMethodNone implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = newError("'none' signature type is not allowed", ErrTokenUnverifiable)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString string, sig []byte, key any) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if len(sig) != 0 {
+ return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable)
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key any) ([]byte, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return []byte{}, nil
+ }
+
+ return nil, NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go
new file mode 100644
index 00000000..054c7eb6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go
@@ -0,0 +1,268 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const tokenDelimiter = "."
+
+type Parser struct {
+ // If populated, only these methods will be considered valid.
+ validMethods []string
+
+ // Use JSON Number format in JSON decoder.
+ useJSONNumber bool
+
+ // Skip claims validation during token parsing.
+ skipClaimsValidation bool
+
+ validator *Validator
+
+ decodeStrict bool
+
+ decodePaddingAllowed bool
+}
+
+// NewParser creates a new Parser with the specified options
+func NewParser(options ...ParserOption) *Parser {
+ p := &Parser{
+ validator: &Validator{},
+ }
+
+ // Loop through our parsing options and apply them
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the key for validating.
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
+// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
+// than the default MapClaims implementation of Claims.
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.validMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.validMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid)
+ }
+ }
+
+ // Decode signature
+ token.Signature, err = p.DecodeSegment(parts[2])
+ if err != nil {
+ return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
+ }
+ text := strings.Join(parts[0:2], ".")
+
+ // Lookup key(s)
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, newError("no keyfunc was provided", ErrTokenUnverifiable)
+ }
+
+ got, err := keyFunc(token)
+ if err != nil {
+ return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
+ }
+
+ switch have := got.(type) {
+ case VerificationKeySet:
+ if len(have.Keys) == 0 {
+ return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable)
+ }
+ // Iterate through keys and verify signature, skipping the rest when a match is found.
+ // Return the last error if no match is found.
+ for _, key := range have.Keys {
+ if err = token.Method.Verify(text, token.Signature, key); err == nil {
+ break
+ }
+ }
+ default:
+ err = token.Method.Verify(text, token.Signature, have)
+ }
+ if err != nil {
+ return token, newError("", ErrTokenSignatureInvalid, err)
+ }
+
+ // Validate Claims
+ if !p.skipClaimsValidation {
+ // Make sure we have at least a default validator
+ if p.validator == nil {
+ p.validator = NewValidator()
+ }
+
+ if err := p.validator.Validate(claims); err != nil {
+ return token, newError("", ErrTokenInvalidClaims, err)
+ }
+ }
+
+ // No errors so far, token is valid.
+ token.Valid = true
+
+ return token, nil
+}
+
+// ParseUnverified parses the token but doesn't validate the signature.
+//
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (since it has already
+// been or will be checked elsewhere in the stack) and you want to extract values from it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ var ok bool
+ parts, ok = splitToken(tokenString)
+ if !ok {
+ return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
+ return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
+ }
+
+ // parse Claims
+ token.Claims = claims
+
+ claimBytes, err := p.DecodeSegment(parts[1])
+ if err != nil {
+ return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err)
+ }
+
+ // If `useJSONNumber` is enabled then we must use *json.Decoder to decode
+ // the claims. However, this comes with a performance penalty so only use
+ // it if we must and, otherwise, simple use json.Unmarshal.
+ if !p.useJSONNumber {
+ // JSON Unmarshal. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = json.Unmarshal(claimBytes, &c)
+ } else {
+ err = json.Unmarshal(claimBytes, &claims)
+ }
+ } else {
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ dec.UseNumber()
+ // JSON Decode. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ }
+ if err != nil {
+ return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err)
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable)
+ }
+ } else {
+ return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
+ }
+
+ return token, parts, nil
+}
+
+// splitToken splits a token string into three parts: header, claims, and signature. It will only
+// return true if the token contains exactly two delimiters and three parts. In all other cases, it
+// will return nil parts and false.
+func splitToken(token string) ([]string, bool) {
+ parts := make([]string, 3)
+ header, remain, ok := strings.Cut(token, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[0] = header
+ claims, remain, ok := strings.Cut(remain, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[1] = claims
+ // One more cut to ensure the signature is the last part of the token and there are no more
+ // delimiters. This avoids an issue where malicious input could contain additional delimiters
+ // causing unecessary overhead parsing tokens.
+ signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
+ if unexpected {
+ return nil, false
+ }
+ parts[2] = signature
+
+ return parts, true
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding. This function will
+// take into account whether the [Parser] is configured with additional options,
+// such as [WithStrictDecoding] or [WithPaddingAllowed].
+func (p *Parser) DecodeSegment(seg string) ([]byte, error) {
+ encoding := base64.RawURLEncoding
+
+ if p.decodePaddingAllowed {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ encoding = base64.URLEncoding
+ }
+
+ if p.decodeStrict {
+ encoding = encoding.Strict()
+ }
+ return encoding.DecodeString(seg)
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the cryptographic key
+// for verifying the signature. The caller is strongly encouraged to set the
+// WithValidMethods option to validate the 'alg' claim in the token matches the
+// expected algorithm. For more details about the importance of validating the
+// 'alg' claim, see
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).Parse(tokenString, keyFunc)
+}
+
+// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+//
+// Note: If you provide a custom claim implementation that embeds one of the
+// standard claims (such as RegisteredClaims), make sure that a) you either
+// embed a non-pointer version of the claims or b) if you are using a pointer,
+// allocate the proper memory for it before passing in the overall claims,
+// otherwise you might run into a panic.
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
new file mode 100644
index 00000000..43157355
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
@@ -0,0 +1,145 @@
+package jwt
+
+import "time"
+
+// ParserOption is used to implement functional-style options that modify the
+// behavior of the parser. To add new options, just create a function (ideally
+// beginning with With or Without) that returns an anonymous function that takes
+// a *Parser type as input and manipulates its configuration accordingly.
+type ParserOption func(*Parser)
+
+// WithValidMethods is an option to supply algorithm methods that the parser
+// will check. Only those methods will be considered valid. It is heavily
+// encouraged to use this option in order to prevent attacks such as
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
+func WithValidMethods(methods []string) ParserOption {
+ return func(p *Parser) {
+ p.validMethods = methods
+ }
+}
+
+// WithJSONNumber is an option to configure the underlying JSON parser with
+// UseNumber.
+func WithJSONNumber() ParserOption {
+ return func(p *Parser) {
+ p.useJSONNumber = true
+ }
+}
+
+// WithoutClaimsValidation is an option to disable claims validation. This
+// option should only be used if you exactly know what you are doing.
+func WithoutClaimsValidation() ParserOption {
+ return func(p *Parser) {
+ p.skipClaimsValidation = true
+ }
+}
+
+// WithLeeway returns the ParserOption for specifying the leeway window.
+func WithLeeway(leeway time.Duration) ParserOption {
+ return func(p *Parser) {
+ p.validator.leeway = leeway
+ }
+}
+
+// WithTimeFunc returns the ParserOption for specifying the time func. The
+// primary use-case for this is testing. If you are looking for a way to account
+// for clock-skew, WithLeeway should be used instead.
+func WithTimeFunc(f func() time.Time) ParserOption {
+ return func(p *Parser) {
+ p.validator.timeFunc = f
+ }
+}
+
+// WithIssuedAt returns the ParserOption to enable verification
+// of issued-at.
+func WithIssuedAt() ParserOption {
+ return func(p *Parser) {
+ p.validator.verifyIat = true
+ }
+}
+
+// WithExpirationRequired returns the ParserOption to make exp claim required.
+// By default exp claim is optional.
+func WithExpirationRequired() ParserOption {
+ return func(p *Parser) {
+ p.validator.requireExp = true
+ }
+}
+
+// WithAudience configures the validator to require any of the specified
+// audiences in the `aud` claim. Validation will fail if the audience is not
+// listed in the token or the `aud` claim is missing.
+//
+// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an audience is expected.
+func WithAudience(aud ...string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedAud = aud
+ }
+}
+
+// WithAllAudiences configures the validator to require all the specified
+// audiences in the `aud` claim. Validation will fail if the specified audiences
+// are not listed in the token or the `aud` claim is missing. Duplicates within
+// the list are de-duplicated since internally, we use a map to look up the
+// audiences.
+//
+// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an audience is expected.
+func WithAllAudiences(aud ...string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedAud = aud
+ p.validator.expectAllAud = true
+ }
+}
+
+// WithIssuer configures the validator to require the specified issuer in the
+// `iss` claim. Validation will fail if a different issuer is specified in the
+// token or the `iss` claim is missing.
+//
+// NOTE: While the `iss` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an issuer is expected.
+func WithIssuer(iss string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedIss = iss
+ }
+}
+
+// WithSubject configures the validator to require the specified subject in the
+// `sub` claim. Validation will fail if a different subject is specified in the
+// token or the `sub` claim is missing.
+//
+// NOTE: While the `sub` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if a subject is expected.
+func WithSubject(sub string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedSub = sub
+ }
+}
+
+// WithPaddingAllowed will enable the codec used for decoding JWTs to allow
+// padding. Note that the JWS RFC7515 states that the tokens will utilize a
+// Base64url encoding with no padding. Unfortunately, some implementations of
+// JWT are producing non-standard tokens, and thus require support for decoding.
+func WithPaddingAllowed() ParserOption {
+ return func(p *Parser) {
+ p.decodePaddingAllowed = true
+ }
+}
+
+// WithStrictDecoding will switch the codec used for decoding JWTs into strict
+// mode. In this mode, the decoder requires that trailing padding bits are zero,
+// as described in RFC 4648 section 3.5.
+func WithStrictDecoding() ParserOption {
+ return func(p *Parser) {
+ p.decodeStrict = true
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
new file mode 100644
index 00000000..77951a53
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
@@ -0,0 +1,63 @@
+package jwt
+
+// RegisteredClaims are a structured version of the JWT Claims Set,
+// restricted to Registered Claim Names, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+//
+// This type can be used on its own, but then additional private and
+// public claims embedded in the JWT will not be parsed. The typical use-case
+// therefore is to embedded this in a user-defined claim type.
+//
+// See examples for how to use this with your own claim types.
+type RegisteredClaims struct {
+ // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+ Issuer string `json:"iss,omitempty"`
+
+ // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+ Subject string `json:"sub,omitempty"`
+
+ // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+ Audience ClaimStrings `json:"aud,omitempty"`
+
+ // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+ ExpiresAt *NumericDate `json:"exp,omitempty"`
+
+ // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+ NotBefore *NumericDate `json:"nbf,omitempty"`
+
+ // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+ IssuedAt *NumericDate `json:"iat,omitempty"`
+
+ // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+ ID string `json:"jti,omitempty"`
+}
+
+// GetExpirationTime implements the Claims interface.
+func (c RegisteredClaims) GetExpirationTime() (*NumericDate, error) {
+ return c.ExpiresAt, nil
+}
+
+// GetNotBefore implements the Claims interface.
+func (c RegisteredClaims) GetNotBefore() (*NumericDate, error) {
+ return c.NotBefore, nil
+}
+
+// GetIssuedAt implements the Claims interface.
+func (c RegisteredClaims) GetIssuedAt() (*NumericDate, error) {
+ return c.IssuedAt, nil
+}
+
+// GetAudience implements the Claims interface.
+func (c RegisteredClaims) GetAudience() (ClaimStrings, error) {
+ return c.Audience, nil
+}
+
+// GetIssuer implements the Claims interface.
+func (c RegisteredClaims) GetIssuer() (string, error) {
+ return c.Issuer, nil
+}
+
+// GetSubject implements the Claims interface.
+func (c RegisteredClaims) GetSubject() (string, error) {
+ return c.Subject, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
new file mode 100644
index 00000000..98b960a7
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
@@ -0,0 +1,93 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSA implements the RSA family of signing methods.
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key any) error {
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Sign implements token signing for the SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key any) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
new file mode 100644
index 00000000..f17590cc
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
@@ -0,0 +1,132 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+ // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+ // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+ // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+ // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+ VerifyOptions *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company.
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key any) error {
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ opts := m.Options
+ if m.VerifyOptions != nil {
+ opts = m.VerifyOptions
+ }
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key any) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
new file mode 100644
index 00000000..f22c3d06
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
@@ -0,0 +1,107 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+)
+
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey any
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
new file mode 100644
index 00000000..096d0ed4
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
@@ -0,0 +1,49 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// SigningMethod can be used add new methods for signing or verifying tokens. It
+// takes a decoded signature as an input in the Verify function and produces a
+// signature in Sign. The signature is then usually base64 encoded as part of a
+// JWT.
+type SigningMethod interface {
+ Verify(signingString string, sig []byte, key any) error // Returns nil if signature is valid
+ Sign(signingString string, key any) ([]byte, error) // Returns signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// GetSigningMethod retrieves a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
+
+// GetAlgorithms returns a list of registered "alg" names
+func GetAlgorithms() (algs []string) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ for alg := range signingMethods {
+ algs = append(algs, alg)
+ }
+ return
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
new file mode 100644
index 00000000..53745d51
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
new file mode 100644
index 00000000..3f715588
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token.go
@@ -0,0 +1,100 @@
+package jwt
+
+import (
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+)
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification. The function receives the parsed, but unverified
+// Token. This allows you to use properties in the Header of the token (such as
+// `kid`) to identify which key to use.
+//
+// The returned any may be a single key or a VerificationKeySet containing
+// multiple keys.
+type Keyfunc func(*Token) (any, error)
+
+// VerificationKey represents a public or secret key for verifying a token's signature.
+type VerificationKey interface {
+ crypto.PublicKey | []uint8
+}
+
+// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token.
+type VerificationKeySet struct {
+ Keys []VerificationKey
+}
+
+// Token represents a JWT Token. Different fields will be used depending on
+// whether you're creating or parsing/verifying a token.
+type Token struct {
+ Raw string // Raw contains the raw token. Populated when you [Parse] a token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]any // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
+ Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
+}
+
+// New creates a new [Token] with the specified signing method and an empty map
+// of claims. Additional options can be specified, but are currently unused.
+func New(method SigningMethod, opts ...TokenOption) *Token {
+ return NewWithClaims(method, MapClaims{}, opts...)
+}
+
+// NewWithClaims creates a new [Token] with the specified signing method and
+// claims. Additional options can be specified, but are currently unused.
+func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token {
+ return &Token{
+ Header: map[string]any{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// SignedString creates and returns a complete, signed JWT. The token is signed
+// using the SigningMethod specified in the token. Please refer to
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types
+// for an overview of the different signing methods and their respective key
+// types.
+func (t *Token) SignedString(key any) (string, error) {
+ sstr, err := t.SigningString()
+ if err != nil {
+ return "", err
+ }
+
+ sig, err := t.Method.Sign(sstr, key)
+ if err != nil {
+ return "", err
+ }
+
+ return sstr + "." + t.EncodeSegment(sig), nil
+}
+
+// SigningString generates the signing string. This is the most expensive part
+// of the whole deal. Unless you need this for something special, just go
+// straight for the SignedString.
+func (t *Token) SigningString() (string, error) {
+ h, err := json.Marshal(t.Header)
+ if err != nil {
+ return "", err
+ }
+
+ c, err := json.Marshal(t.Claims)
+ if err != nil {
+ return "", err
+ }
+
+ return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding
+// stripped. In the future, this function might take into account a
+// [TokenOption]. Therefore, this function exists as a method of [Token], rather
+// than a global function.
+func (*Token) EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token_option.go b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
new file mode 100644
index 00000000..b4ae3bad
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
@@ -0,0 +1,5 @@
+package jwt
+
+// TokenOption is a reserved type, which provides some forward compatibility,
+// if we ever want to introduce token creation-related options.
+type TokenOption func(*Token)
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
new file mode 100644
index 00000000..a3e0ef12
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/types.go
@@ -0,0 +1,149 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "time"
+)
+
+// TimePrecision sets the precision of times and dates within this library. This
+// has an influence on the precision of times when comparing expiry or other
+// related time fields. Furthermore, it is also the precision of times when
+// serializing.
+//
+// For backwards compatibility the default precision is set to seconds, so that
+// no fractional timestamps are generated.
+var TimePrecision = time.Second
+
+// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type,
+// especially its MarshalJSON function.
+//
+// If it is set to true (the default), it will always serialize the type as an
+// array of strings, even if it just contains one element, defaulting to the
+// behavior of the underlying []string. If it is set to false, it will serialize
+// to a single string, if it contains one element. Otherwise, it will serialize
+// to an array of strings.
+var MarshalSingleStringAsArray = true
+
+// NumericDate represents a JSON numeric date value, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+type NumericDate struct {
+ time.Time
+}
+
+// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+// It will truncate the timestamp according to the precision specified in TimePrecision.
+func NewNumericDate(t time.Time) *NumericDate {
+ return &NumericDate{t.Truncate(TimePrecision)}
+}
+
+// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+// UNIX epoch with the float fraction representing non-integer seconds.
+func newNumericDateFromSeconds(f float64) *NumericDate {
+ round, frac := math.Modf(f)
+ return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+}
+
+// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+func (date NumericDate) MarshalJSON() (b []byte, err error) {
+ var prec int
+ if TimePrecision < time.Second {
+ prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+ }
+ truncatedDate := date.Truncate(TimePrecision)
+
+ // For very large timestamps, UnixNano would overflow an int64, but this
+ // function requires nanosecond level precision, so we have to use the
+ // following technique to get round the issue:
+ //
+ // 1. Take the normal unix timestamp to form the whole number part of the
+ // output,
+ // 2. Take the result of the Nanosecond function, which returns the offset
+ // within the second of the particular unix time instance, to form the
+ // decimal part of the output
+ // 3. Concatenate them to produce the final result
+ seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+ nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+ output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+ return output, nil
+}
+
+// UnmarshalJSON is an implementation of the json.RawMessage interface and
+// deserializes a [NumericDate] from a JSON representation, i.e. a
+// [json.Number]. This number represents an UNIX epoch with either integer or
+// non-integer seconds.
+func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+ var (
+ number json.Number
+ f float64
+ )
+
+ if err = json.Unmarshal(b, &number); err != nil {
+ return fmt.Errorf("could not parse NumericData: %w", err)
+ }
+
+ if f, err = number.Float64(); err != nil {
+ return fmt.Errorf("could not convert json number value to float: %w", err)
+ }
+
+ n := newNumericDateFromSeconds(f)
+ *date = *n
+
+ return nil
+}
+
+// ClaimStrings is basically just a slice of strings, but it can be either
+// serialized from a string array or just a string. This type is necessary,
+// since the "aud" claim can either be a single string or an array.
+type ClaimStrings []string
+
+func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+ var value any
+
+ if err = json.Unmarshal(data, &value); err != nil {
+ return err
+ }
+
+ var aud []string
+
+ switch v := value.(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = ClaimStrings(v)
+ case []any:
+ for _, vv := range v {
+ vs, ok := vv.(string)
+ if !ok {
+ return ErrInvalidType
+ }
+ aud = append(aud, vs)
+ }
+ case nil:
+ return nil
+ default:
+ return ErrInvalidType
+ }
+
+ *s = aud
+
+ return
+}
+
+func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+ // This handles a special case in the JWT RFC. If the string array, e.g.
+ // used by the "aud" field, only contains one element, it MAY be serialized
+ // as a single string. This may or may not be desired based on the ecosystem
+ // of other JWT library used, so we make it configurable by the variable
+ // MarshalSingleStringAsArray.
+ if len(s) == 1 && !MarshalSingleStringAsArray {
+ return json.Marshal(s[0])
+ }
+
+ return json.Marshal([]string(s))
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
new file mode 100644
index 00000000..92b5c057
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go
@@ -0,0 +1,326 @@
+package jwt
+
+import (
+ "fmt"
+ "slices"
+ "time"
+)
+
+// ClaimsValidator is an interface that can be implemented by custom claims who
+// wish to execute any additional claims validation based on
+// application-specific logic. The Validate function is then executed in
+// addition to the regular claims validation and any error returned is appended
+// to the final validation result.
+//
+// type MyCustomClaims struct {
+// Foo string `json:"foo"`
+// jwt.RegisteredClaims
+// }
+//
+// func (m MyCustomClaims) Validate() error {
+// if m.Foo != "bar" {
+// return errors.New("must be foobar")
+// }
+// return nil
+// }
+type ClaimsValidator interface {
+ Claims
+ Validate() error
+}
+
+// Validator is the core of the new Validation API. It is automatically used by
+// a [Parser] during parsing and can be modified with various parser options.
+//
+// The [NewValidator] function should be used to create an instance of this
+// struct.
+type Validator struct {
+ // leeway is an optional leeway that can be provided to account for clock skew.
+ leeway time.Duration
+
+ // timeFunc is used to supply the current time that is needed for
+ // validation. If unspecified, this defaults to time.Now.
+ timeFunc func() time.Time
+
+ // requireExp specifies whether the exp claim is required
+ requireExp bool
+
+ // verifyIat specifies whether the iat (Issued At) claim will be verified.
+ // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
+ // only specifies the age of the token, but no validation check is
+ // necessary. However, if wanted, it can be checked if the iat is
+ // unrealistic, i.e., in the future.
+ verifyIat bool
+
+ // expectedAud contains the audience this token expects. Supplying an empty
+ // slice will disable aud checking.
+ expectedAud []string
+
+ // expectAllAud specifies whether all expected audiences must be present in
+ // the token. If false, only one of the expected audiences must be present.
+ expectAllAud bool
+
+ // expectedIss contains the issuer this token expects. Supplying an empty
+ // string will disable iss checking.
+ expectedIss string
+
+ // expectedSub contains the subject this token expects. Supplying an empty
+ // string will disable sub checking.
+ expectedSub string
+}
+
+// NewValidator can be used to create a stand-alone validator with the supplied
+// options. This validator can then be used to validate already parsed claims.
+//
+// Note: Under normal circumstances, explicitly creating a validator is not
+// needed and can potentially be dangerous; instead functions of the [Parser]
+// class should be used.
+//
+// The [Validator] is only checking the *validity* of the claims, such as its
+// expiration time, but it does NOT perform *signature verification* of the
+// token.
+func NewValidator(opts ...ParserOption) *Validator {
+ p := NewParser(opts...)
+ return p.validator
+}
+
+// Validate validates the given claims. It will also perform any custom
+// validation if claims implements the [ClaimsValidator] interface.
+//
+// Note: It will NOT perform any *signature verification* on the token that
+// contains the claims and expects that the [Claim] was already successfully
+// verified.
+func (v *Validator) Validate(claims Claims) error {
+ var (
+ now time.Time
+ errs = make([]error, 0, 6)
+ err error
+ )
+
+ // Check, if we have a time func
+ if v.timeFunc != nil {
+ now = v.timeFunc()
+ } else {
+ now = time.Now()
+ }
+
+ // We always need to check the expiration time, but usage of the claim
+ // itself is OPTIONAL by default. requireExp overrides this behavior
+ // and makes the exp claim mandatory.
+ if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil {
+ errs = append(errs, err)
+ }
+
+ // We always need to check not-before, but usage of the claim itself is
+ // OPTIONAL.
+ if err = v.verifyNotBefore(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Check issued-at if the option is enabled
+ if v.verifyIat {
+ if err = v.verifyIssuedAt(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected audience, we also require the audience claim
+ if len(v.expectedAud) > 0 {
+ if err = v.verifyAudience(claims, v.expectedAud, v.expectAllAud); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected issuer, we also require the issuer claim
+ if v.expectedIss != "" {
+ if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected subject, we also require the subject claim
+ if v.expectedSub != "" {
+ if err = v.verifySubject(claims, v.expectedSub, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // Finally, we want to give the claim itself some possibility to do some
+ // additional custom validation based on a custom Validate function.
+ cvt, ok := claims.(ClaimsValidator)
+ if ok {
+ if err := cvt.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+
+ return joinErrors(errs...)
+}
+
+// verifyExpiresAt compares the exp claim in claims against cmp. This function
+// will succeed if cmp < exp. Additional leeway is taken into account.
+//
+// If exp is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
+ exp, err := claims.GetExpirationTime()
+ if err != nil {
+ return err
+ }
+
+ if exp == nil {
+ return errorIfRequired(required, "exp")
+ }
+
+ return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired)
+}
+
+// verifyIssuedAt compares the iat claim in claims against cmp. This function
+// will succeed if cmp >= iat. Additional leeway is taken into account.
+//
+// If iat is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
+ iat, err := claims.GetIssuedAt()
+ if err != nil {
+ return err
+ }
+
+ if iat == nil {
+ return errorIfRequired(required, "iat")
+ }
+
+ return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued)
+}
+
+// verifyNotBefore compares the nbf claim in claims against cmp. This function
+// will return true if cmp >= nbf. Additional leeway is taken into account.
+//
+// If nbf is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
+ nbf, err := claims.GetNotBefore()
+ if err != nil {
+ return err
+ }
+
+ if nbf == nil {
+ return errorIfRequired(required, "nbf")
+ }
+
+ return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet)
+}
+
+// verifyAudience compares the aud claim against cmp.
+//
+// If aud is not set or an empty list, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyAudience(claims Claims, cmp []string, expectAllAud bool) error {
+ aud, err := claims.GetAudience()
+ if err != nil {
+ return err
+ }
+
+ // Check that aud exists and is not empty. We only require the aud claim
+ // if we expect at least one audience to be present.
+ if len(aud) == 0 || len(aud) == 1 && aud[0] == "" {
+ required := len(v.expectedAud) > 0
+ return errorIfRequired(required, "aud")
+ }
+
+ if !expectAllAud {
+ for _, a := range aud {
+ // If we only expect one match, we can stop early if we find a match
+ if slices.Contains(cmp, a) {
+ return nil
+ }
+ }
+
+ return ErrTokenInvalidAudience
+ }
+
+ // Note that we are looping cmp here to ensure that all expected audiences
+ // are present in the aud claim.
+ for _, a := range cmp {
+ if !slices.Contains(aud, a) {
+ return ErrTokenInvalidAudience
+ }
+ }
+
+ return nil
+}
+
+// verifyIssuer compares the iss claim in claims against cmp.
+//
+// If iss is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error {
+ iss, err := claims.GetIssuer()
+ if err != nil {
+ return err
+ }
+
+ if iss == "" {
+ return errorIfRequired(required, "iss")
+ }
+
+ return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer)
+}
+
+// verifySubject compares the sub claim against cmp.
+//
+// If sub is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error {
+ sub, err := claims.GetSubject()
+ if err != nil {
+ return err
+ }
+
+ if sub == "" {
+ return errorIfRequired(required, "sub")
+ }
+
+ return errorIfFalse(sub == cmp, ErrTokenInvalidSubject)
+}
+
+// errorIfFalse returns the error specified in err, if the value is true.
+// Otherwise, nil is returned.
+func errorIfFalse(value bool, err error) error {
+ if value {
+ return nil
+ } else {
+ return err
+ }
+}
+
+// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is
+// true. Otherwise, nil is returned.
+func errorIfRequired(required bool, claim string) error {
+ if required {
+ return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing)
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
deleted file mode 100644
index 15167cd7..00000000
--- a/vendor/github.com/golang/protobuf/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9..00000000
--- a/vendor/github.com/golang/protobuf/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
deleted file mode 100644
index e810e6fe..00000000
--- a/vendor/github.com/golang/protobuf/proto/buffer.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "errors"
- "fmt"
-
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- WireVarint = 0
- WireFixed32 = 5
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
-)
-
-// EncodeVarint returns the varint encoded bytes of v.
-func EncodeVarint(v uint64) []byte {
- return protowire.AppendVarint(nil, v)
-}
-
-// SizeVarint returns the length of the varint encoded bytes of v.
-// This is equal to len(EncodeVarint(v)).
-func SizeVarint(v uint64) int {
- return protowire.SizeVarint(v)
-}
-
-// DecodeVarint parses a varint encoded integer from b,
-// returning the integer value and the length of the varint.
-// It returns (0, 0) if there is a parse error.
-func DecodeVarint(b []byte) (uint64, int) {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return 0, 0
- }
- return v, n
-}
-
-// Buffer is a buffer for encoding and decoding the protobuf wire format.
-// It may be reused between invocations to reduce memory usage.
-type Buffer struct {
- buf []byte
- idx int
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer initialized with buf,
-// where the contents of buf are considered the unread portion of the buffer.
-func NewBuffer(buf []byte) *Buffer {
- return &Buffer{buf: buf}
-}
-
-// SetDeterministic specifies whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (b *Buffer) SetDeterministic(deterministic bool) {
- b.deterministic = deterministic
-}
-
-// SetBuf sets buf as the internal buffer,
-// where the contents of buf are considered the unread portion of the buffer.
-func (b *Buffer) SetBuf(buf []byte) {
- b.buf = buf
- b.idx = 0
-}
-
-// Reset clears the internal buffer of all written and unread data.
-func (b *Buffer) Reset() {
- b.buf = b.buf[:0]
- b.idx = 0
-}
-
-// Bytes returns the internal buffer.
-func (b *Buffer) Bytes() []byte {
- return b.buf
-}
-
-// Unread returns the unread portion of the buffer.
-func (b *Buffer) Unread() []byte {
- return b.buf[b.idx:]
-}
-
-// Marshal appends the wire-format encoding of m to the buffer.
-func (b *Buffer) Marshal(m Message) error {
- var err error
- b.buf, err = marshalAppend(b.buf, m, b.deterministic)
- return err
-}
-
-// Unmarshal parses the wire-format message in the buffer and
-// places the decoded results in m.
-// It does not reset m before unmarshaling.
-func (b *Buffer) Unmarshal(m Message) error {
- err := UnmarshalMerge(b.Unread(), m)
- b.idx = len(b.buf)
- return err
-}
-
-type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
-
-func (m *unknownFields) String() string { panic("not implemented") }
-func (m *unknownFields) Reset() { panic("not implemented") }
-func (m *unknownFields) ProtoMessage() { panic("not implemented") }
-
-// DebugPrint dumps the encoded bytes of b with a header and footer including s
-// to stdout. This is only intended for debugging.
-func (*Buffer) DebugPrint(s string, b []byte) {
- m := MessageReflect(new(unknownFields))
- m.SetUnknown(b)
- b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
- fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
-}
-
-// EncodeVarint appends an unsigned varint encoding to the buffer.
-func (b *Buffer) EncodeVarint(v uint64) error {
- b.buf = protowire.AppendVarint(b.buf, v)
- return nil
-}
-
-// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
-func (b *Buffer) EncodeZigzag32(v uint64) error {
- return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
-}
-
-// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
-func (b *Buffer) EncodeZigzag64(v uint64) error {
- return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
-}
-
-// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
-func (b *Buffer) EncodeFixed32(v uint64) error {
- b.buf = protowire.AppendFixed32(b.buf, uint32(v))
- return nil
-}
-
-// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
-func (b *Buffer) EncodeFixed64(v uint64) error {
- b.buf = protowire.AppendFixed64(b.buf, uint64(v))
- return nil
-}
-
-// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
-func (b *Buffer) EncodeRawBytes(v []byte) error {
- b.buf = protowire.AppendBytes(b.buf, v)
- return nil
-}
-
-// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
-// It does not validate whether v contains valid UTF-8.
-func (b *Buffer) EncodeStringBytes(v string) error {
- b.buf = protowire.AppendString(b.buf, v)
- return nil
-}
-
-// EncodeMessage appends a length-prefixed encoded message to the buffer.
-func (b *Buffer) EncodeMessage(m Message) error {
- var err error
- b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
- b.buf, err = marshalAppend(b.buf, m, b.deterministic)
- return err
-}
-
-// DecodeVarint consumes an encoded unsigned varint from the buffer.
-func (b *Buffer) DecodeVarint() (uint64, error) {
- v, n := protowire.ConsumeVarint(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
-func (b *Buffer) DecodeZigzag32() (uint64, error) {
- v, err := b.DecodeVarint()
- if err != nil {
- return 0, err
- }
- return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
-}
-
-// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
-func (b *Buffer) DecodeZigzag64() (uint64, error) {
- v, err := b.DecodeVarint()
- if err != nil {
- return 0, err
- }
- return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
-}
-
-// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
-func (b *Buffer) DecodeFixed32() (uint64, error) {
- v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
-func (b *Buffer) DecodeFixed64() (uint64, error) {
- v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
-// If alloc is specified, it returns a copy the raw bytes
-// rather than a sub-slice of the buffer.
-func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
- v, n := protowire.ConsumeBytes(b.buf[b.idx:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- b.idx += n
- if alloc {
- v = append([]byte(nil), v...)
- }
- return v, nil
-}
-
-// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
-// It does not validate whether the raw bytes contain valid UTF-8.
-func (b *Buffer) DecodeStringBytes() (string, error) {
- v, n := protowire.ConsumeString(b.buf[b.idx:])
- if n < 0 {
- return "", protowire.ParseError(n)
- }
- b.idx += n
- return v, nil
-}
-
-// DecodeMessage consumes a length-prefixed message from the buffer.
-// It does not reset m before unmarshaling.
-func (b *Buffer) DecodeMessage(m Message) error {
- v, err := b.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return UnmarshalMerge(v, m)
-}
-
-// DecodeGroup consumes a message group from the buffer.
-// It assumes that the start group marker has already been consumed and
-// consumes all bytes until (and including the end group marker).
-// It does not reset m before unmarshaling.
-func (b *Buffer) DecodeGroup(m Message) error {
- v, n, err := consumeGroup(b.buf[b.idx:])
- if err != nil {
- return err
- }
- b.idx += n
- return UnmarshalMerge(v, m)
-}
-
-// consumeGroup parses b until it finds an end group marker, returning
-// the raw bytes of the message (excluding the end group marker) and the
-// the total length of the message (including the end group marker).
-func consumeGroup(b []byte) ([]byte, int, error) {
- b0 := b
- depth := 1 // assume this follows a start group marker
- for {
- _, wtyp, tagLen := protowire.ConsumeTag(b)
- if tagLen < 0 {
- return nil, 0, protowire.ParseError(tagLen)
- }
- b = b[tagLen:]
-
- var valLen int
- switch wtyp {
- case protowire.VarintType:
- _, valLen = protowire.ConsumeVarint(b)
- case protowire.Fixed32Type:
- _, valLen = protowire.ConsumeFixed32(b)
- case protowire.Fixed64Type:
- _, valLen = protowire.ConsumeFixed64(b)
- case protowire.BytesType:
- _, valLen = protowire.ConsumeBytes(b)
- case protowire.StartGroupType:
- depth++
- case protowire.EndGroupType:
- depth--
- default:
- return nil, 0, errors.New("proto: cannot parse reserved wire type")
- }
- if valLen < 0 {
- return nil, 0, protowire.ParseError(valLen)
- }
- b = b[valLen:]
-
- if depth == 0 {
- return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
- }
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
deleted file mode 100644
index d399bf06..00000000
--- a/vendor/github.com/golang/protobuf/proto/defaults.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// SetDefaults sets unpopulated scalar fields to their default values.
-// Fields within a oneof are not set even if they have a default value.
-// SetDefaults is recursively called upon any populated message fields.
-func SetDefaults(m Message) {
- if m != nil {
- setDefaults(MessageReflect(m))
- }
-}
-
-func setDefaults(m protoreflect.Message) {
- fds := m.Descriptor().Fields()
- for i := 0; i < fds.Len(); i++ {
- fd := fds.Get(i)
- if !m.Has(fd) {
- if fd.HasDefault() && fd.ContainingOneof() == nil {
- v := fd.Default()
- if fd.Kind() == protoreflect.BytesKind {
- v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
- }
- m.Set(fd, v)
- }
- continue
- }
- }
-
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- switch {
- // Handle singular message.
- case fd.Cardinality() != protoreflect.Repeated:
- if fd.Message() != nil {
- setDefaults(m.Get(fd).Message())
- }
- // Handle list of messages.
- case fd.IsList():
- if fd.Message() != nil {
- ls := m.Get(fd).List()
- for i := 0; i < ls.Len(); i++ {
- setDefaults(ls.Get(i).Message())
- }
- }
- // Handle map of messages.
- case fd.IsMap():
- if fd.MapValue().Message() != nil {
- ms := m.Get(fd).Map()
- ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
- setDefaults(v.Message())
- return true
- })
- }
- }
- return true
- })
-}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
deleted file mode 100644
index e8db57e0..00000000
--- a/vendor/github.com/golang/protobuf/proto/deprecated.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
-
- protoV2 "google.golang.org/protobuf/proto"
-)
-
-var (
- // Deprecated: No longer returned.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // Deprecated: No longer returned.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-
- // Deprecated: No longer returned.
- ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-)
-
-// Deprecated: Do not use.
-type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-
-// Deprecated: Do not use.
-func GetStats() Stats { return Stats{} }
-
-// Deprecated: Do not use.
-func MarshalMessageSet(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func UnmarshalMessageSet([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func MarshalMessageSetJSON(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func UnmarshalMessageSetJSON([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func RegisterMessageSetType(Message, int32, string) {}
-
-// Deprecated: Do not use.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// Deprecated: Do not use.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// Deprecated: Do not use; this type existed for intenal-use only.
-type InternalMessageInfo struct{}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) DiscardUnknown(m Message) {
- DiscardUnknown(m)
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
- return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Merge(dst, src Message) {
- protoV2.Merge(MessageV2(dst), MessageV2(src))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Size(m Message) int {
- return protoV2.Size(MessageV2(m))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
- return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
deleted file mode 100644
index 2187e877..00000000
--- a/vendor/github.com/golang/protobuf/proto/discard.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// DiscardUnknown recursively discards all unknown fields from this message
-// and all embedded messages.
-//
-// When unmarshaling a message with unrecognized fields, the tags and values
-// of such fields are preserved in the Message. This allows a later call to
-// marshal to be able to produce a message that continues to have those
-// unrecognized fields. To avoid this, DiscardUnknown is used to
-// explicitly clear the unknown fields after unmarshaling.
-func DiscardUnknown(m Message) {
- if m != nil {
- discardUnknown(MessageReflect(m))
- }
-}
-
-func discardUnknown(m protoreflect.Message) {
- m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
- switch {
- // Handle singular message.
- case fd.Cardinality() != protoreflect.Repeated:
- if fd.Message() != nil {
- discardUnknown(m.Get(fd).Message())
- }
- // Handle list of messages.
- case fd.IsList():
- if fd.Message() != nil {
- ls := m.Get(fd).List()
- for i := 0; i < ls.Len(); i++ {
- discardUnknown(ls.Get(i).Message())
- }
- }
- // Handle map of messages.
- case fd.IsMap():
- if fd.MapValue().Message() != nil {
- ms := m.Get(fd).Map()
- ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
- discardUnknown(v.Message())
- return true
- })
- }
- }
- return true
- })
-
- // Discard unknown fields.
- if len(m.GetUnknown()) > 0 {
- m.SetUnknown(nil)
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
deleted file mode 100644
index 42fc120c..00000000
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-type (
- // ExtensionDesc represents an extension descriptor and
- // is used to interact with an extension field in a message.
- //
- // Variables of this type are generated in code by protoc-gen-go.
- ExtensionDesc = protoimpl.ExtensionInfo
-
- // ExtensionRange represents a range of message extensions.
- // Used in code generated by protoc-gen-go.
- ExtensionRange = protoiface.ExtensionRangeV1
-
- // Deprecated: Do not use; this is an internal type.
- Extension = protoimpl.ExtensionFieldV1
-
- // Deprecated: Do not use; this is an internal type.
- XXX_InternalExtensions = protoimpl.ExtensionFields
-)
-
-// ErrMissingExtension reports whether the extension was not present.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-
-// HasExtension reports whether the extension field is present in m
-// either as an explicitly populated field or as an unknown field.
-func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return false
- }
-
- // Check whether any populated known field matches the field number.
- xtd := xt.TypeDescriptor()
- if isValidExtension(mr.Descriptor(), xtd) {
- has = mr.Has(xtd)
- } else {
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- has = int32(fd.Number()) == xt.Field
- return !has
- })
- }
-
- // Check whether any unknown field matches the field number.
- for b := mr.GetUnknown(); !has && len(b) > 0; {
- num, _, n := protowire.ConsumeField(b)
- has = int32(num) == xt.Field
- b = b[n:]
- }
- return has
-}
-
-// ClearExtension removes the extension field from m
-// either as an explicitly populated field or as an unknown field.
-func ClearExtension(m Message, xt *ExtensionDesc) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- xtd := xt.TypeDescriptor()
- if isValidExtension(mr.Descriptor(), xtd) {
- mr.Clear(xtd)
- } else {
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- if int32(fd.Number()) == xt.Field {
- mr.Clear(fd)
- return false
- }
- return true
- })
- }
- clearUnknown(mr, fieldNum(xt.Field))
-}
-
-// ClearAllExtensions clears all extensions from m.
-// This includes populated fields and unknown fields in the extension range.
-func ClearAllExtensions(m Message) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- if fd.IsExtension() {
- mr.Clear(fd)
- }
- return true
- })
- clearUnknown(mr, mr.Descriptor().ExtensionRanges())
-}
-
-// GetExtension retrieves a proto2 extended field from m.
-//
-// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
-// then GetExtension parses the encoded field and returns a Go value of the specified type.
-// If the field is not present, then the default value is returned (if one is specified),
-// otherwise ErrMissingExtension is reported.
-//
-// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes for the extension field.
-func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return nil, errNotExtendable
- }
-
- // Retrieve the unknown fields for this extension field.
- var bo protoreflect.RawFields
- for bi := mr.GetUnknown(); len(bi) > 0; {
- num, _, n := protowire.ConsumeField(bi)
- if int32(num) == xt.Field {
- bo = append(bo, bi[:n]...)
- }
- bi = bi[n:]
- }
-
- // For type incomplete descriptors, only retrieve the unknown fields.
- if xt.ExtensionType == nil {
- return []byte(bo), nil
- }
-
- // If the extension field only exists as unknown fields, unmarshal it.
- // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
- xtd := xt.TypeDescriptor()
- if !isValidExtension(mr.Descriptor(), xtd) {
- return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
- }
- if !mr.Has(xtd) && len(bo) > 0 {
- m2 := mr.New()
- if err := (proto.UnmarshalOptions{
- Resolver: extensionResolver{xt},
- }.Unmarshal(bo, m2.Interface())); err != nil {
- return nil, err
- }
- if m2.Has(xtd) {
- mr.Set(xtd, m2.Get(xtd))
- clearUnknown(mr, fieldNum(xt.Field))
- }
- }
-
- // Check whether the message has the extension field set or a default.
- var pv protoreflect.Value
- switch {
- case mr.Has(xtd):
- pv = mr.Get(xtd)
- case xtd.HasDefault():
- pv = xtd.Default()
- default:
- return nil, ErrMissingExtension
- }
-
- v := xt.InterfaceOf(pv)
- rv := reflect.ValueOf(v)
- if isScalarKind(rv.Kind()) {
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
- }
- return v, nil
-}
-
-// extensionResolver is a custom extension resolver that stores a single
-// extension type that takes precedence over the global registry.
-type extensionResolver struct{ xt protoreflect.ExtensionType }
-
-func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
- if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
- return r.xt, nil
- }
- return protoregistry.GlobalTypes.FindExtensionByName(field)
-}
-
-func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
- if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
- return r.xt, nil
- }
- return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
-}
-
-// GetExtensions returns a list of the extensions values present in m,
-// corresponding with the provided list of extension descriptors, xts.
-// If an extension is missing in m, the corresponding value is nil.
-func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return nil, errNotExtendable
- }
-
- vs := make([]interface{}, len(xts))
- for i, xt := range xts {
- v, err := GetExtension(m, xt)
- if err != nil {
- if err == ErrMissingExtension {
- continue
- }
- return vs, err
- }
- vs[i] = v
- }
- return vs, nil
-}
-
-// SetExtension sets an extension field in m to the provided value.
-func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return errNotExtendable
- }
-
- rv := reflect.ValueOf(v)
- if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
- }
- if rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
- }
- if isScalarKind(rv.Elem().Kind()) {
- v = rv.Elem().Interface()
- }
- }
-
- xtd := xt.TypeDescriptor()
- if !isValidExtension(mr.Descriptor(), xtd) {
- return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
- }
- mr.Set(xtd, xt.ValueOf(v))
- clearUnknown(mr, fieldNum(xt.Field))
- return nil
-}
-
-// SetRawExtension inserts b into the unknown fields of m.
-//
-// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
-func SetRawExtension(m Message, fnum int32, b []byte) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- // Verify that the raw field is valid.
- for b0 := b; len(b0) > 0; {
- num, _, n := protowire.ConsumeField(b0)
- if int32(num) != fnum {
- panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
- }
- b0 = b0[n:]
- }
-
- ClearExtension(m, &ExtensionDesc{Field: fnum})
- mr.SetUnknown(append(mr.GetUnknown(), b...))
-}
-
-// ExtensionDescs returns a list of extension descriptors found in m,
-// containing descriptors for both populated extension fields in m and
-// also unknown fields of m that are in the extension range.
-// For the later case, an type incomplete descriptor is provided where only
-// the ExtensionDesc.Field field is populated.
-// The order of the extension descriptors is undefined.
-func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return nil, errNotExtendable
- }
-
- // Collect a set of known extension descriptors.
- extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
- mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- xt := fd.(protoreflect.ExtensionTypeDescriptor)
- if xd, ok := xt.Type().(*ExtensionDesc); ok {
- extDescs[fd.Number()] = xd
- }
- }
- return true
- })
-
- // Collect a set of unknown extension descriptors.
- extRanges := mr.Descriptor().ExtensionRanges()
- for b := mr.GetUnknown(); len(b) > 0; {
- num, _, n := protowire.ConsumeField(b)
- if extRanges.Has(num) && extDescs[num] == nil {
- extDescs[num] = nil
- }
- b = b[n:]
- }
-
- // Transpose the set of descriptors into a list.
- var xts []*ExtensionDesc
- for num, xt := range extDescs {
- if xt == nil {
- xt = &ExtensionDesc{Field: int32(num)}
- }
- xts = append(xts, xt)
- }
- return xts, nil
-}
-
-// isValidExtension reports whether xtd is a valid extension descriptor for md.
-func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
- return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
-}
-
-// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
-// This function exists for historical reasons since the representation of
-// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
-func isScalarKind(k reflect.Kind) bool {
- switch k {
- case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- return true
- default:
- return false
- }
-}
-
-// clearUnknown removes unknown fields from m where remover.Has reports true.
-func clearUnknown(m protoreflect.Message, remover interface {
- Has(protoreflect.FieldNumber) bool
-}) {
- var bo protoreflect.RawFields
- for bi := m.GetUnknown(); len(bi) > 0; {
- num, _, n := protowire.ConsumeField(bi)
- if !remover.Has(num) {
- bo = append(bo, bi[:n]...)
- }
- bi = bi[n:]
- }
- if bi := m.GetUnknown(); len(bi) != len(bo) {
- m.SetUnknown(bo)
- }
-}
-
-type fieldNum protoreflect.FieldNumber
-
-func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
- return protoreflect.FieldNumber(n1) == n2
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
deleted file mode 100644
index dcdc2202..00000000
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "sync"
-
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// StructProperties represents protocol buffer type information for a
-// generated protobuf message in the open-struct API.
-//
-// Deprecated: Do not use.
-type StructProperties struct {
- // Prop are the properties for each field.
- //
- // Fields belonging to a oneof are stored in OneofTypes instead, with a
- // single Properties representing the parent oneof held here.
- //
- // The order of Prop matches the order of fields in the Go struct.
- // Struct fields that are not related to protobufs have a "XXX_" prefix
- // in the Properties.Name and must be ignored by the user.
- Prop []*Properties
-
- // OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the protobuf field name.
- OneofTypes map[string]*OneofProperties
-}
-
-// Properties represents the type information for a protobuf message field.
-//
-// Deprecated: Do not use.
-type Properties struct {
- // Name is a placeholder name with little meaningful semantic value.
- // If the name has an "XXX_" prefix, the entire Properties must be ignored.
- Name string
- // OrigName is the protobuf field name or oneof name.
- OrigName string
- // JSONName is the JSON name for the protobuf field.
- JSONName string
- // Enum is a placeholder name for enums.
- // For historical reasons, this is neither the Go name for the enum,
- // nor the protobuf name for the enum.
- Enum string // Deprecated: Do not use.
- // Weak contains the full name of the weakly referenced message.
- Weak string
- // Wire is a string representation of the wire type.
- Wire string
- // WireType is the protobuf wire type for the field.
- WireType int
- // Tag is the protobuf field number.
- Tag int
- // Required reports whether this is a required field.
- Required bool
- // Optional reports whether this is a optional field.
- Optional bool
- // Repeated reports whether this is a repeated field.
- Repeated bool
- // Packed reports whether this is a packed repeated field of scalars.
- Packed bool
- // Proto3 reports whether this field operates under the proto3 syntax.
- Proto3 bool
- // Oneof reports whether this field belongs within a oneof.
- Oneof bool
-
- // Default is the default value in string form.
- Default string
- // HasDefault reports whether the field has a default value.
- HasDefault bool
-
- // MapKeyProp is the properties for the key field for a map field.
- MapKeyProp *Properties
- // MapValProp is the properties for the value field for a map field.
- MapValProp *Properties
-}
-
-// OneofProperties represents the type information for a protobuf oneof.
-//
-// Deprecated: Do not use.
-type OneofProperties struct {
- // Type is a pointer to the generated wrapper type for the field value.
- // This is nil for messages that are not in the open-struct API.
- Type reflect.Type
- // Field is the index into StructProperties.Prop for the containing oneof.
- Field int
- // Prop is the properties for the field.
- Prop *Properties
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
- s := p.Wire
- s += "," + strconv.Itoa(p.Tag)
- if p.Required {
- s += ",req"
- }
- if p.Optional {
- s += ",opt"
- }
- if p.Repeated {
- s += ",rep"
- }
- if p.Packed {
- s += ",packed"
- }
- s += ",name=" + p.OrigName
- if p.JSONName != "" {
- s += ",json=" + p.JSONName
- }
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
- if len(p.Weak) > 0 {
- s += ",weak=" + p.Weak
- }
- if p.Proto3 {
- s += ",proto3"
- }
- if p.Oneof {
- s += ",oneof"
- }
- if p.HasDefault {
- s += ",def=" + p.Default
- }
- return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(tag string) {
- // For example: "bytes,49,opt,name=foo,def=hello!"
- for len(tag) > 0 {
- i := strings.IndexByte(tag, ',')
- if i < 0 {
- i = len(tag)
- }
- switch s := tag[:i]; {
- case strings.HasPrefix(s, "name="):
- p.OrigName = s[len("name="):]
- case strings.HasPrefix(s, "json="):
- p.JSONName = s[len("json="):]
- case strings.HasPrefix(s, "enum="):
- p.Enum = s[len("enum="):]
- case strings.HasPrefix(s, "weak="):
- p.Weak = s[len("weak="):]
- case strings.Trim(s, "0123456789") == "":
- n, _ := strconv.ParseUint(s, 10, 32)
- p.Tag = int(n)
- case s == "opt":
- p.Optional = true
- case s == "req":
- p.Required = true
- case s == "rep":
- p.Repeated = true
- case s == "varint" || s == "zigzag32" || s == "zigzag64":
- p.Wire = s
- p.WireType = WireVarint
- case s == "fixed32":
- p.Wire = s
- p.WireType = WireFixed32
- case s == "fixed64":
- p.Wire = s
- p.WireType = WireFixed64
- case s == "bytes":
- p.Wire = s
- p.WireType = WireBytes
- case s == "group":
- p.Wire = s
- p.WireType = WireStartGroup
- case s == "packed":
- p.Packed = true
- case s == "proto3":
- p.Proto3 = true
- case s == "oneof":
- p.Oneof = true
- case strings.HasPrefix(s, "def="):
- // The default tag is special in that everything afterwards is the
- // default regardless of the presence of commas.
- p.HasDefault = true
- p.Default, i = tag[len("def="):], len(tag)
- }
- tag = strings.TrimPrefix(tag[i:], ",")
- }
-}
-
-// Init populates the properties from a protocol buffer struct tag.
-//
-// Deprecated: Do not use.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.Name = name
- p.OrigName = name
- if tag == "" {
- return
- }
- p.Parse(tag)
-
- if typ != nil && typ.Kind() == reflect.Map {
- p.MapKeyProp = new(Properties)
- p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
- p.MapValProp = new(Properties)
- p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
- }
-}
-
-var propertiesCache sync.Map // map[reflect.Type]*StructProperties
-
-// GetProperties returns the list of properties for the type represented by t,
-// which must be a generated protocol buffer message in the open-struct API,
-// where protobuf message fields are represented by exported Go struct fields.
-//
-// Deprecated: Use protobuf reflection instead.
-func GetProperties(t reflect.Type) *StructProperties {
- if p, ok := propertiesCache.Load(t); ok {
- return p.(*StructProperties)
- }
- p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
- return p.(*StructProperties)
-}
-
-func newProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
- }
-
- var hasOneof bool
- prop := new(StructProperties)
-
- // Construct a list of properties for each field in the struct.
- for i := 0; i < t.NumField(); i++ {
- p := new(Properties)
- f := t.Field(i)
- tagField := f.Tag.Get("protobuf")
- p.Init(f.Type, f.Name, tagField, &f)
-
- tagOneof := f.Tag.Get("protobuf_oneof")
- if tagOneof != "" {
- hasOneof = true
- p.OrigName = tagOneof
- }
-
- // Rename unrelated struct fields with the "XXX_" prefix since so much
- // user code simply checks for this to exclude special fields.
- if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
- p.Name = "XXX_" + p.Name
- p.OrigName = "XXX_" + p.OrigName
- } else if p.Weak != "" {
- p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
- }
-
- prop.Prop = append(prop.Prop, p)
- }
-
- // Construct a mapping of oneof field names to properties.
- if hasOneof {
- var oneofWrappers []interface{}
- if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
- oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
- }
- if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
- oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
- }
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
- if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
- oneofWrappers = m.ProtoMessageInfo().OneofWrappers
- }
- }
-
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, wrapper := range oneofWrappers {
- p := &OneofProperties{
- Type: reflect.ValueOf(wrapper).Type(), // *T
- Prop: new(Properties),
- }
- f := p.Type.Elem().Field(0)
- p.Prop.Name = f.Name
- p.Prop.Parse(f.Tag.Get("protobuf"))
-
- // Determine the struct field that contains this oneof.
- // Each wrapper is assignable to exactly one parent field.
- var foundOneof bool
- for i := 0; i < t.NumField() && !foundOneof; i++ {
- if p.Type.AssignableTo(t.Field(i).Type) {
- p.Field = i
- foundOneof = true
- }
- }
- if !foundOneof {
- panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
- }
- prop.OneofTypes[p.Prop.OrigName] = p
- }
- }
-
- return prop
-}
-
-func (sp *StructProperties) Len() int { return len(sp.Prop) }
-func (sp *StructProperties) Less(i, j int) bool { return false }
-func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
deleted file mode 100644
index 5aee89c3..00000000
--- a/vendor/github.com/golang/protobuf/proto/proto.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package proto provides functionality for handling protocol buffer messages.
-// In particular, it provides marshaling and unmarshaling between a protobuf
-// message and the binary wire format.
-//
-// See https://developers.google.com/protocol-buffers/docs/gotutorial for
-// more information.
-//
-// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
-package proto
-
-import (
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- ProtoPackageIsVersion1 = true
- ProtoPackageIsVersion2 = true
- ProtoPackageIsVersion3 = true
- ProtoPackageIsVersion4 = true
-)
-
-// GeneratedEnum is any enum type generated by protoc-gen-go
-// which is a named int32 kind.
-// This type exists for documentation purposes.
-type GeneratedEnum interface{}
-
-// GeneratedMessage is any message type generated by protoc-gen-go
-// which is a pointer to a named struct kind.
-// This type exists for documentation purposes.
-type GeneratedMessage interface{}
-
-// Message is a protocol buffer message.
-//
-// This is the v1 version of the message interface and is marginally better
-// than an empty interface as it lacks any method to programatically interact
-// with the contents of the message.
-//
-// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
-// exposes protobuf reflection as a first-class feature of the interface.
-//
-// To convert a v1 message to a v2 message, use the MessageV2 function.
-// To convert a v2 message to a v1 message, use the MessageV1 function.
-type Message = protoiface.MessageV1
-
-// MessageV1 converts either a v1 or v2 message to a v1 message.
-// It returns nil if m is nil.
-func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
- return protoimpl.X.ProtoMessageV1Of(m)
-}
-
-// MessageV2 converts either a v1 or v2 message to a v2 message.
-// It returns nil if m is nil.
-func MessageV2(m GeneratedMessage) protoV2.Message {
- return protoimpl.X.ProtoMessageV2Of(m)
-}
-
-// MessageReflect returns a reflective view for a message.
-// It returns nil if m is nil.
-func MessageReflect(m Message) protoreflect.Message {
- return protoimpl.X.MessageOf(m)
-}
-
-// Marshaler is implemented by messages that can marshal themselves.
-// This interface is used by the following functions: Size, Marshal,
-// Buffer.Marshal, and Buffer.EncodeMessage.
-//
-// Deprecated: Do not implement.
-type Marshaler interface {
- // Marshal formats the encoded bytes of the message.
- // It should be deterministic and emit valid protobuf wire data.
- // The caller takes ownership of the returned buffer.
- Marshal() ([]byte, error)
-}
-
-// Unmarshaler is implemented by messages that can unmarshal themselves.
-// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
-// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
-//
-// Deprecated: Do not implement.
-type Unmarshaler interface {
- // Unmarshal parses the encoded bytes of the protobuf wire input.
- // The provided buffer is only valid for during method call.
- // It should not reset the receiver message.
- Unmarshal([]byte) error
-}
-
-// Merger is implemented by messages that can merge themselves.
-// This interface is used by the following functions: Clone and Merge.
-//
-// Deprecated: Do not implement.
-type Merger interface {
- // Merge merges the contents of src into the receiver message.
- // It clones all data structures in src such that it aliases no mutable
- // memory referenced by src.
- Merge(src Message)
-}
-
-// RequiredNotSetError is an error type returned when
-// marshaling or unmarshaling a message with missing required fields.
-type RequiredNotSetError struct {
- err error
-}
-
-func (e *RequiredNotSetError) Error() string {
- if e.err != nil {
- return e.err.Error()
- }
- return "proto: required field not set"
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-func checkRequiredNotSet(m protoV2.Message) error {
- if err := protoV2.CheckInitialized(m); err != nil {
- return &RequiredNotSetError{err: err}
- }
- return nil
-}
-
-// Clone returns a deep copy of src.
-func Clone(src Message) Message {
- return MessageV1(protoV2.Clone(MessageV2(src)))
-}
-
-// Merge merges src into dst, which must be messages of the same type.
-//
-// Populated scalar fields in src are copied to dst, while populated
-// singular messages in src are merged into dst by recursively calling Merge.
-// The elements of every list field in src is appended to the corresponded
-// list fields in dst. The entries of every map field in src is copied into
-// the corresponding map field in dst, possibly replacing existing entries.
-// The unknown fields of src are appended to the unknown fields of dst.
-func Merge(dst, src Message) {
- protoV2.Merge(MessageV2(dst), MessageV2(src))
-}
-
-// Equal reports whether two messages are equal.
-// If two messages marshal to the same bytes under deterministic serialization,
-// then Equal is guaranteed to report true.
-//
-// Two messages are equal if they are the same protobuf message type,
-// have the same set of populated known and extension field values,
-// and the same set of unknown fields values.
-//
-// Scalar values are compared with the equivalent of the == operator in Go,
-// except bytes values which are compared using bytes.Equal and
-// floating point values which specially treat NaNs as equal.
-// Message values are compared by recursively calling Equal.
-// Lists are equal if each element value is also equal.
-// Maps are equal if they have the same set of keys, where the pair of values
-// for each key is also equal.
-func Equal(x, y Message) bool {
- return protoV2.Equal(MessageV2(x), MessageV2(y))
-}
-
-func isMessageSet(md protoreflect.MessageDescriptor) bool {
- ms, ok := md.(interface{ IsMessageSet() bool })
- return ok && ms.IsMessageSet()
-}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
deleted file mode 100644
index 066b4323..00000000
--- a/vendor/github.com/golang/protobuf/proto/registry.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "reflect"
- "strings"
- "sync"
-
- "google.golang.org/protobuf/reflect/protodesc"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// filePath is the path to the proto source file.
-type filePath = string // e.g., "google/protobuf/descriptor.proto"
-
-// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
-type fileDescGZIP = []byte
-
-var fileCache sync.Map // map[filePath]fileDescGZIP
-
-// RegisterFile is called from generated code to register the compressed
-// FileDescriptorProto with the file path for a proto source file.
-//
-// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
-func RegisterFile(s filePath, d fileDescGZIP) {
- // Decompress the descriptor.
- zr, err := gzip.NewReader(bytes.NewReader(d))
- if err != nil {
- panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
- }
- b, err := ioutil.ReadAll(zr)
- if err != nil {
- panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
- }
-
- // Construct a protoreflect.FileDescriptor from the raw descriptor.
- // Note that DescBuilder.Build automatically registers the constructed
- // file descriptor with the v2 registry.
- protoimpl.DescBuilder{RawDescriptor: b}.Build()
-
- // Locally cache the raw descriptor form for the file.
- fileCache.Store(s, d)
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto given the file path
-// for a proto source file. It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
-func FileDescriptor(s filePath) fileDescGZIP {
- if v, ok := fileCache.Load(s); ok {
- return v.(fileDescGZIP)
- }
-
- // Find the descriptor in the v2 registry.
- var b []byte
- if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
- b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
- }
-
- // Locally cache the raw descriptor form for the file.
- if len(b) > 0 {
- v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
- return v.(fileDescGZIP)
- }
- return nil
-}
-
-// enumName is the name of an enum. For historical reasons, the enum name is
-// neither the full Go name nor the full protobuf name of the enum.
-// The name is the dot-separated combination of just the proto package that the
-// enum is declared within followed by the Go type name of the generated enum.
-type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
-
-// enumsByName maps enum values by name to their numeric counterpart.
-type enumsByName = map[string]int32
-
-// enumsByNumber maps enum values by number to their name counterpart.
-type enumsByNumber = map[int32]string
-
-var enumCache sync.Map // map[enumName]enumsByName
-var numFilesCache sync.Map // map[protoreflect.FullName]int
-
-// RegisterEnum is called from the generated code to register the mapping of
-// enum value names to enum numbers for the enum identified by s.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
-func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
- if _, ok := enumCache.Load(s); ok {
- panic("proto: duplicate enum registered: " + s)
- }
- enumCache.Store(s, m)
-
- // This does not forward registration to the v2 registry since this API
- // lacks sufficient information to construct a complete v2 enum descriptor.
-}
-
-// EnumValueMap returns the mapping from enum value names to enum numbers for
-// the enum of the given name. It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
-func EnumValueMap(s enumName) enumsByName {
- if v, ok := enumCache.Load(s); ok {
- return v.(enumsByName)
- }
-
- // Check whether the cache is stale. If the number of files in the current
- // package differs, then it means that some enums may have been recently
- // registered upstream that we do not know about.
- var protoPkg protoreflect.FullName
- if i := strings.LastIndexByte(s, '.'); i >= 0 {
- protoPkg = protoreflect.FullName(s[:i])
- }
- v, _ := numFilesCache.Load(protoPkg)
- numFiles, _ := v.(int)
- if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
- return nil // cache is up-to-date; was not found earlier
- }
-
- // Update the enum cache for all enums declared in the given proto package.
- numFiles = 0
- protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
- walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
- name := protoimpl.X.LegacyEnumName(ed)
- if _, ok := enumCache.Load(name); !ok {
- m := make(enumsByName)
- evs := ed.Values()
- for i := evs.Len() - 1; i >= 0; i-- {
- ev := evs.Get(i)
- m[string(ev.Name())] = int32(ev.Number())
- }
- enumCache.LoadOrStore(name, m)
- }
- })
- numFiles++
- return true
- })
- numFilesCache.Store(protoPkg, numFiles)
-
- // Check cache again for enum map.
- if v, ok := enumCache.Load(s); ok {
- return v.(enumsByName)
- }
- return nil
-}
-
-// walkEnums recursively walks all enums declared in d.
-func walkEnums(d interface {
- Enums() protoreflect.EnumDescriptors
- Messages() protoreflect.MessageDescriptors
-}, f func(protoreflect.EnumDescriptor)) {
- eds := d.Enums()
- for i := eds.Len() - 1; i >= 0; i-- {
- f(eds.Get(i))
- }
- mds := d.Messages()
- for i := mds.Len() - 1; i >= 0; i-- {
- walkEnums(mds.Get(i), f)
- }
-}
-
-// messageName is the full name of protobuf message.
-type messageName = string
-
-var messageTypeCache sync.Map // map[messageName]reflect.Type
-
-// RegisterType is called from generated code to register the message Go type
-// for a message of the given name.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
-func RegisterType(m Message, s messageName) {
- mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
- if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
- panic(err)
- }
- messageTypeCache.Store(s, reflect.TypeOf(m))
-}
-
-// RegisterMapType is called from generated code to register the Go map type
-// for a protobuf message representing a map entry.
-//
-// Deprecated: Do not use.
-func RegisterMapType(m interface{}, s messageName) {
- t := reflect.TypeOf(m)
- if t.Kind() != reflect.Map {
- panic(fmt.Sprintf("invalid map kind: %v", t))
- }
- if _, ok := messageTypeCache.Load(s); ok {
- panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
- }
- messageTypeCache.Store(s, t)
-}
-
-// MessageType returns the message type for a named message.
-// It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
-func MessageType(s messageName) reflect.Type {
- if v, ok := messageTypeCache.Load(s); ok {
- return v.(reflect.Type)
- }
-
- // Derive the message type from the v2 registry.
- var t reflect.Type
- if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
- t = messageGoType(mt)
- }
-
- // If we could not get a concrete type, it is possible that it is a
- // pseudo-message for a map entry.
- if t == nil {
- d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
- if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
- kt := goTypeForField(md.Fields().ByNumber(1))
- vt := goTypeForField(md.Fields().ByNumber(2))
- t = reflect.MapOf(kt, vt)
- }
- }
-
- // Locally cache the message type for the given name.
- if t != nil {
- v, _ := messageTypeCache.LoadOrStore(s, t)
- return v.(reflect.Type)
- }
- return nil
-}
-
-func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
- switch k := fd.Kind(); k {
- case protoreflect.EnumKind:
- if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
- return enumGoType(et)
- }
- return reflect.TypeOf(protoreflect.EnumNumber(0))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
- return messageGoType(mt)
- }
- return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
- default:
- return reflect.TypeOf(fd.Default().Interface())
- }
-}
-
-func enumGoType(et protoreflect.EnumType) reflect.Type {
- return reflect.TypeOf(et.New(0))
-}
-
-func messageGoType(mt protoreflect.MessageType) reflect.Type {
- return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
-}
-
-// MessageName returns the full protobuf name for the given message type.
-//
-// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
-func MessageName(m Message) messageName {
- if m == nil {
- return ""
- }
- if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
- return m.XXX_MessageName()
- }
- return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
-}
-
-// RegisterExtension is called from the generated code to register
-// the extension descriptor.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
-func RegisterExtension(d *ExtensionDesc) {
- if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
- panic(err)
- }
-}
-
-type extensionsByNumber = map[int32]*ExtensionDesc
-
-var extensionCache sync.Map // map[messageName]extensionsByNumber
-
-// RegisteredExtensions returns a map of the registered extensions for the
-// provided protobuf message, indexed by the extension field number.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
-func RegisteredExtensions(m Message) extensionsByNumber {
- // Check whether the cache is stale. If the number of extensions for
- // the given message differs, then it means that some extensions were
- // recently registered upstream that we do not know about.
- s := MessageName(m)
- v, _ := extensionCache.Load(s)
- xs, _ := v.(extensionsByNumber)
- if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
- return xs // cache is up-to-date
- }
-
- // Cache is stale, re-compute the extensions map.
- xs = make(extensionsByNumber)
- protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
- if xd, ok := xt.(*ExtensionDesc); ok {
- xs[int32(xt.TypeDescriptor().Number())] = xd
- } else {
- // TODO: This implies that the protoreflect.ExtensionType is a
- // custom type not generated by protoc-gen-go. We could try and
- // convert the type to an ExtensionDesc.
- }
- return true
- })
- extensionCache.Store(s, xs)
- return xs
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
deleted file mode 100644
index 47eb3e44..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_decode.go
+++ /dev/null
@@ -1,801 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-
- "google.golang.org/protobuf/encoding/prototext"
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapTextUnmarshalV2 = false
-
-// ParseError is returned by UnmarshalText.
-type ParseError struct {
- Message string
-
- // Deprecated: Do not use.
- Line, Offset int
-}
-
-func (e *ParseError) Error() string {
- if wrapTextUnmarshalV2 {
- return e.Message
- }
- if e.Line == 1 {
- return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
- }
- return fmt.Sprintf("line %d: %v", e.Line, e.Message)
-}
-
-// UnmarshalText parses a proto text formatted string into m.
-func UnmarshalText(s string, m Message) error {
- if u, ok := m.(encoding.TextUnmarshaler); ok {
- return u.UnmarshalText([]byte(s))
- }
-
- m.Reset()
- mi := MessageV2(m)
-
- if wrapTextUnmarshalV2 {
- err := prototext.UnmarshalOptions{
- AllowPartial: true,
- }.Unmarshal([]byte(s), mi)
- if err != nil {
- return &ParseError{Message: err.Error()}
- }
- return checkRequiredNotSet(mi)
- } else {
- if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
- return err
- }
- return checkRequiredNotSet(mi)
- }
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
- md := m.Descriptor()
- fds := md.Fields()
-
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- seen := make(map[protoreflect.FieldNumber]bool)
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := protoreflect.Name(tok.value)
- fd := fds.ByName(name)
- switch {
- case fd == nil:
- gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
- if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
- fd = gd
- }
- case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
- fd = nil
- case fd.IsWeak() && fd.Message().IsPlaceholder():
- fd = nil
- }
- if fd == nil {
- typeName := string(md.FullName())
- if m, ok := m.Interface().(Message); ok {
- t := reflect.TypeOf(m)
- if t.Kind() == reflect.Ptr {
- typeName = t.Elem().String()
- }
- }
- return p.errorf("unknown field name %q in %v", name, typeName)
- }
- if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
- }
- if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
- return p.errorf("non-repeated field %q was repeated", fd.Name())
- }
- seen[fd.Number()] = true
-
- // Consume any colon.
- if err := p.checkForColon(fd); err != nil {
- return err
- }
-
- // Parse into the field.
- v := m.Get(fd)
- if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
- v = m.Mutable(fd)
- }
- if v, err = p.unmarshalValue(v, fd); err != nil {
- return err
- }
- m.Set(fd, v)
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
- name, err := p.consumeExtensionOrAnyName()
- if err != nil {
- return err
- }
-
- // If it contains a slash, it's an Any type URL.
- if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
-
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
-
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
- if err != nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
- }
- m2 := mt.New()
- if err := p.unmarshalMessage(m2, terminator); err != nil {
- return err
- }
- b, err := protoV2.Marshal(m2.Interface())
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
- }
-
- urlFD := m.Descriptor().Fields().ByName("type_url")
- valFD := m.Descriptor().Fields().ByName("value")
- if seen[urlFD.Number()] {
- return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
- }
- if seen[valFD.Number()] {
- return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
- }
- m.Set(urlFD, protoreflect.ValueOfString(name))
- m.Set(valFD, protoreflect.ValueOfBytes(b))
- seen[urlFD.Number()] = true
- seen[valFD.Number()] = true
- return nil
- }
-
- xname := protoreflect.FullName(name)
- xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
- if xt == nil && isMessageSet(m.Descriptor()) {
- xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
- }
- if xt == nil {
- return p.errorf("unrecognized extension %q", name)
- }
- fd := xt.TypeDescriptor()
- if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
- return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
- }
-
- if err := p.checkForColon(fd); err != nil {
- return err
- }
-
- v := m.Get(fd)
- if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
- v = m.Mutable(fd)
- }
- v, err = p.unmarshalValue(v, fd)
- if err != nil {
- return err
- }
- m.Set(fd, v)
- return p.consumeOptionalSeparator()
-}
-
-func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "" {
- return v, p.errorf("unexpected EOF")
- }
-
- switch {
- case fd.IsList():
- lv := v.List()
- var err error
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- vv := lv.NewElement()
- vv, err = p.unmarshalSingularValue(vv, fd)
- if err != nil {
- return v, err
- }
- lv.Append(vv)
-
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return v, p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return v, nil
- }
-
- // One value of the repeated field.
- p.back()
- vv := lv.NewElement()
- vv, err = p.unmarshalSingularValue(vv, fd)
- if err != nil {
- return v, err
- }
- lv.Append(vv)
- return v, nil
- case fd.IsMap():
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order.
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return v, p.errorf("expected '{' or '<', found %q", tok.value)
- }
-
- keyFD := fd.MapKey()
- valFD := fd.MapValue()
-
- mv := v.Map()
- kv := keyFD.Default()
- vv := mv.NewValue()
- for {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == terminator {
- break
- }
- var err error
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return v, err
- }
- if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
- return v, err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return v, err
- }
- case "value":
- if err := p.checkForColon(valFD); err != nil {
- return v, err
- }
- if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
- return v, err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return v, err
- }
- default:
- p.back()
- return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
- mv.Set(kv.MapKey(), vv)
- return v, nil
- default:
- p.back()
- return p.unmarshalSingularValue(v, fd)
- }
-}
-
-func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "" {
- return v, p.errorf("unexpected EOF")
- }
-
- switch fd.Kind() {
- case protoreflect.BoolKind:
- switch tok.value {
- case "true", "1", "t", "True":
- return protoreflect.ValueOfBool(true), nil
- case "false", "0", "f", "False":
- return protoreflect.ValueOfBool(false), nil
- }
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfInt32(int32(x)), nil
- }
-
- // The C++ parser accepts large positive hex numbers that uses
- // two's complement arithmetic to represent negative numbers.
- // This feature is here for backwards compatibility with C++.
- if strings.HasPrefix(tok.value, "0x") {
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
- }
- }
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfInt64(int64(x)), nil
- }
-
- // The C++ parser accepts large positive hex numbers that uses
- // two's complement arithmetic to represent negative numbers.
- // This feature is here for backwards compatibility with C++.
- if strings.HasPrefix(tok.value, "0x") {
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
- }
- }
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfUint32(uint32(x)), nil
- }
- case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfUint64(uint64(x)), nil
- }
- case protoreflect.FloatKind:
- // Ignore 'f' for compatibility with output generated by C++,
- // but don't remove 'f' when the value is "-inf" or "inf".
- v := tok.value
- if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
- v = v[:len(v)-len("f")]
- }
- if x, err := strconv.ParseFloat(v, 32); err == nil {
- return protoreflect.ValueOfFloat32(float32(x)), nil
- }
- case protoreflect.DoubleKind:
- // Ignore 'f' for compatibility with output generated by C++,
- // but don't remove 'f' when the value is "-inf" or "inf".
- v := tok.value
- if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
- v = v[:len(v)-len("f")]
- }
- if x, err := strconv.ParseFloat(v, 64); err == nil {
- return protoreflect.ValueOfFloat64(float64(x)), nil
- }
- case protoreflect.StringKind:
- if isQuote(tok.value[0]) {
- return protoreflect.ValueOfString(tok.unquoted), nil
- }
- case protoreflect.BytesKind:
- if isQuote(tok.value[0]) {
- return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
- }
- case protoreflect.EnumKind:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
- }
- vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
- if vd != nil {
- return protoreflect.ValueOfEnum(vd.Number()), nil
- }
- case protoreflect.MessageKind, protoreflect.GroupKind:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return v, p.errorf("expected '{' or '<', found %q", tok.value)
- }
- err := p.unmarshalMessage(v.Message(), terminator)
- return v, err
- default:
- panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
- }
- return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- if fd.Message() == nil {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
-// the following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtensionOrAnyName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in unmarshalMessage to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-var errBadUTF8 = errors.New("proto: bad UTF-8")
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(rune(i)), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
deleted file mode 100644
index a31134ee..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_encode.go
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "bytes"
- "encoding"
- "fmt"
- "io"
- "math"
- "sort"
- "strings"
-
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapTextMarshalV2 = false
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line)
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes the proto text format of m to w.
-func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
- b, err := tm.marshal(m)
- if len(b) > 0 {
- if _, err := w.Write(b); err != nil {
- return err
- }
- }
- return err
-}
-
-// Text returns a proto text formatted string of m.
-func (tm *TextMarshaler) Text(m Message) string {
- b, _ := tm.marshal(m)
- return string(b)
-}
-
-func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return []byte(""), nil
- }
-
- if wrapTextMarshalV2 {
- if m, ok := m.(encoding.TextMarshaler); ok {
- return m.MarshalText()
- }
-
- opts := prototext.MarshalOptions{
- AllowPartial: true,
- EmitUnknown: true,
- }
- if !tm.Compact {
- opts.Indent = " "
- }
- if !tm.ExpandAny {
- opts.Resolver = (*protoregistry.Types)(nil)
- }
- return opts.Marshal(mr.Interface())
- } else {
- w := &textWriter{
- compact: tm.Compact,
- expandAny: tm.ExpandAny,
- complete: true,
- }
-
- if m, ok := m.(encoding.TextMarshaler); ok {
- b, err := m.MarshalText()
- if err != nil {
- return nil, err
- }
- w.Write(b)
- return w.buf, nil
- }
-
- err := w.writeMessage(mr)
- return w.buf, err
- }
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// MarshalText writes the proto text format of m to w.
-func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
-
-// MarshalTextString returns a proto text formatted string of m.
-func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
-
-// CompactText writes the compact proto text format of m to w.
-func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
-
-// CompactTextString returns a compact proto text formatted string of m.
-func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
-
-var (
- newline = []byte("\n")
- endBraceNewline = []byte("}\n")
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- compact bool // same as TextMarshaler.Compact
- expandAny bool // same as TextMarshaler.ExpandAny
- complete bool // whether the current position is a complete line
- indent int // indentation level; never negative
- buf []byte
-}
-
-func (w *textWriter) Write(p []byte) (n int, _ error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, p...)
- w.complete = false
- return len(p), nil
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- w.buf = append(w.buf, ' ')
- n++
- }
- w.buf = append(w.buf, frag...)
- n += len(frag)
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, frag...)
- n += len(frag)
- if i+1 < len(frags) {
- w.buf = append(w.buf, '\n')
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, c)
- w.complete = c == '\n'
- return nil
-}
-
-func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
-
- if fd.Kind() != protoreflect.GroupKind {
- w.buf = append(w.buf, fd.Name()...)
- w.WriteByte(':')
- } else {
- // Use message type name for group field name.
- w.buf = append(w.buf, fd.Message().Name()...)
- }
-
- if !w.compact {
- w.WriteByte(' ')
- }
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
- md := m.Descriptor()
- fdURL := md.Fields().ByName("type_url")
- fdVal := md.Fields().ByName("value")
-
- url := m.Get(fdURL).String()
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
- if err != nil {
- return false, nil
- }
-
- b := m.Get(fdVal).Bytes()
- m2 := mt.New()
- if err := proto.Unmarshal(b, m2.Interface()); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- if requiresQuotes(url) {
- w.writeQuotedString(url)
- } else {
- w.Write([]byte(url))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.indent++
- }
- if err := w.writeMessage(m2); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.indent--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (w *textWriter) writeMessage(m protoreflect.Message) error {
- md := m.Descriptor()
- if w.expandAny && md.FullName() == "google.protobuf.Any" {
- if canExpand, err := w.writeProto3Any(m); canExpand {
- return err
- }
- }
-
- fds := md.Fields()
- for i := 0; i < fds.Len(); {
- fd := fds.Get(i)
- if od := fd.ContainingOneof(); od != nil {
- fd = m.WhichOneof(od)
- i += od.Fields().Len()
- } else {
- i++
- }
- if fd == nil || !m.Has(fd) {
- continue
- }
-
- switch {
- case fd.IsList():
- lv := m.Get(fd).List()
- for j := 0; j < lv.Len(); j++ {
- w.writeName(fd)
- v := lv.Get(j)
- if err := w.writeSingularValue(v, fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- }
- case fd.IsMap():
- kfd := fd.MapKey()
- vfd := fd.MapValue()
- mv := m.Get(fd).Map()
-
- type entry struct{ key, val protoreflect.Value }
- var entries []entry
- mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
- entries = append(entries, entry{k.Value(), v})
- return true
- })
- sort.Slice(entries, func(i, j int) bool {
- switch kfd.Kind() {
- case protoreflect.BoolKind:
- return !entries[i].key.Bool() && entries[j].key.Bool()
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- return entries[i].key.Int() < entries[j].key.Int()
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- return entries[i].key.Uint() < entries[j].key.Uint()
- case protoreflect.StringKind:
- return entries[i].key.String() < entries[j].key.String()
- default:
- panic("invalid kind")
- }
- })
- for _, entry := range entries {
- w.writeName(fd)
- w.WriteByte('<')
- if !w.compact {
- w.WriteByte('\n')
- }
- w.indent++
- w.writeName(kfd)
- if err := w.writeSingularValue(entry.key, kfd); err != nil {
- return err
- }
- w.WriteByte('\n')
- w.writeName(vfd)
- if err := w.writeSingularValue(entry.val, vfd); err != nil {
- return err
- }
- w.WriteByte('\n')
- w.indent--
- w.WriteByte('>')
- w.WriteByte('\n')
- }
- default:
- w.writeName(fd)
- if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- }
- }
-
- if b := m.GetUnknown(); len(b) > 0 {
- w.writeUnknownFields(b)
- }
- return w.writeExtensions(m)
-}
-
-func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- switch fd.Kind() {
- case protoreflect.FloatKind, protoreflect.DoubleKind:
- switch vf := v.Float(); {
- case math.IsInf(vf, +1):
- w.Write(posInf)
- case math.IsInf(vf, -1):
- w.Write(negInf)
- case math.IsNaN(vf):
- w.Write(nan)
- default:
- fmt.Fprint(w, v.Interface())
- }
- case protoreflect.StringKind:
- // NOTE: This does not validate UTF-8 for historical reasons.
- w.writeQuotedString(string(v.String()))
- case protoreflect.BytesKind:
- w.writeQuotedString(string(v.Bytes()))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- var bra, ket byte = '<', '>'
- if fd.Kind() == protoreflect.GroupKind {
- bra, ket = '{', '}'
- }
- w.WriteByte(bra)
- if !w.compact {
- w.WriteByte('\n')
- }
- w.indent++
- m := v.Message()
- if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
- b, err := m2.MarshalText()
- if err != nil {
- return err
- }
- w.Write(b)
- } else {
- w.writeMessage(m)
- }
- w.indent--
- w.WriteByte(ket)
- case protoreflect.EnumKind:
- if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
- fmt.Fprint(w, ev.Name())
- } else {
- fmt.Fprint(w, v.Enum())
- }
- default:
- fmt.Fprint(w, v.Interface())
- }
- return nil
-}
-
-// writeQuotedString writes a quoted string in the protocol buffer text format.
-func (w *textWriter) writeQuotedString(s string) {
- w.WriteByte('"')
- for i := 0; i < len(s); i++ {
- switch c := s[i]; c {
- case '\n':
- w.buf = append(w.buf, `\n`...)
- case '\r':
- w.buf = append(w.buf, `\r`...)
- case '\t':
- w.buf = append(w.buf, `\t`...)
- case '"':
- w.buf = append(w.buf, `\"`...)
- case '\\':
- w.buf = append(w.buf, `\\`...)
- default:
- if isPrint := c >= 0x20 && c < 0x7f; isPrint {
- w.buf = append(w.buf, c)
- } else {
- w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
- }
- }
- }
- w.WriteByte('"')
-}
-
-func (w *textWriter) writeUnknownFields(b []byte) {
- if !w.compact {
- fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
- }
-
- for len(b) > 0 {
- num, wtyp, n := protowire.ConsumeTag(b)
- if n < 0 {
- return
- }
- b = b[n:]
-
- if wtyp == protowire.EndGroupType {
- w.indent--
- w.Write(endBraceNewline)
- continue
- }
- fmt.Fprint(w, num)
- if wtyp != protowire.StartGroupType {
- w.WriteByte(':')
- }
- if !w.compact || wtyp == protowire.StartGroupType {
- w.WriteByte(' ')
- }
- switch wtyp {
- case protowire.VarintType:
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.Fixed32Type:
- v, n := protowire.ConsumeFixed32(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.Fixed64Type:
- v, n := protowire.ConsumeFixed64(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.BytesType:
- v, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprintf(w, "%q", v)
- case protowire.StartGroupType:
- w.WriteByte('{')
- w.indent++
- default:
- fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
- }
- w.WriteByte('\n')
- }
-}
-
-// writeExtensions writes all the extensions in m.
-func (w *textWriter) writeExtensions(m protoreflect.Message) error {
- md := m.Descriptor()
- if md.ExtensionRanges().Len() == 0 {
- return nil
- }
-
- type ext struct {
- desc protoreflect.FieldDescriptor
- val protoreflect.Value
- }
- var exts []ext
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- exts = append(exts, ext{fd, v})
- }
- return true
- })
- sort.Slice(exts, func(i, j int) bool {
- return exts[i].desc.Number() < exts[j].desc.Number()
- })
-
- for _, ext := range exts {
- // For message set, use the name of the message as the extension name.
- name := string(ext.desc.FullName())
- if isMessageSet(ext.desc.ContainingMessage()) {
- name = strings.TrimSuffix(name, ".message_set_extension")
- }
-
- if !ext.desc.IsList() {
- if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
- return err
- }
- } else {
- lv := ext.val.List()
- for i := 0; i < lv.Len(); i++ {
- if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- fmt.Fprintf(w, "[%s]:", name)
- if !w.compact {
- w.WriteByte(' ')
- }
- if err := w.writeSingularValue(v, fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- for i := 0; i < w.indent*2; i++ {
- w.buf = append(w.buf, ' ')
- }
- w.complete = false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
deleted file mode 100644
index d7c28da5..00000000
--- a/vendor/github.com/golang/protobuf/proto/wire.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/runtime/protoiface"
-)
-
-// Size returns the size in bytes of the wire-format encoding of m.
-func Size(m Message) int {
- if m == nil {
- return 0
- }
- mi := MessageV2(m)
- return protoV2.Size(mi)
-}
-
-// Marshal returns the wire-format encoding of m.
-func Marshal(m Message) ([]byte, error) {
- b, err := marshalAppend(nil, m, false)
- if b == nil {
- b = zeroBytes
- }
- return b, err
-}
-
-var zeroBytes = make([]byte, 0, 0)
-
-func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
- if m == nil {
- return nil, ErrNil
- }
- mi := MessageV2(m)
- nbuf, err := protoV2.MarshalOptions{
- Deterministic: deterministic,
- AllowPartial: true,
- }.MarshalAppend(buf, mi)
- if err != nil {
- return buf, err
- }
- if len(buf) == len(nbuf) {
- if !mi.ProtoReflect().IsValid() {
- return buf, ErrNil
- }
- }
- return nbuf, checkRequiredNotSet(mi)
-}
-
-// Unmarshal parses a wire-format message in b and places the decoded results in m.
-//
-// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
-// removed. Use UnmarshalMerge to preserve and append to existing data.
-func Unmarshal(b []byte, m Message) error {
- m.Reset()
- return UnmarshalMerge(b, m)
-}
-
-// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
-func UnmarshalMerge(b []byte, m Message) error {
- mi := MessageV2(m)
- out, err := protoV2.UnmarshalOptions{
- AllowPartial: true,
- Merge: true,
- }.UnmarshalState(protoiface.UnmarshalInput{
- Buf: b,
- Message: mi.ProtoReflect(),
- })
- if err != nil {
- return err
- }
- if out.Flags&protoiface.UnmarshalInitialized > 0 {
- return nil
- }
- return checkRequiredNotSet(mi)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
deleted file mode 100644
index 398e3485..00000000
--- a/vendor/github.com/golang/protobuf/proto/wrappers.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-// Bool stores v in a new bool value and returns a pointer to it.
-func Bool(v bool) *bool { return &v }
-
-// Int stores v in a new int32 value and returns a pointer to it.
-//
-// Deprecated: Use Int32 instead.
-func Int(v int) *int32 { return Int32(int32(v)) }
-
-// Int32 stores v in a new int32 value and returns a pointer to it.
-func Int32(v int32) *int32 { return &v }
-
-// Int64 stores v in a new int64 value and returns a pointer to it.
-func Int64(v int64) *int64 { return &v }
-
-// Uint32 stores v in a new uint32 value and returns a pointer to it.
-func Uint32(v uint32) *uint32 { return &v }
-
-// Uint64 stores v in a new uint64 value and returns a pointer to it.
-func Uint64(v uint64) *uint64 { return &v }
-
-// Float32 stores v in a new float32 value and returns a pointer to it.
-func Float32(v float32) *float32 { return &v }
-
-// Float64 stores v in a new float64 value and returns a pointer to it.
-func Float64(v float64) *float64 { return &v }
-
-// String stores v in a new string value and returns a pointer to it.
-func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
deleted file mode 100644
index a76f8076..00000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
-
-package timestamp
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/timestamp.proto.
-
-type Timestamp = timestamppb.Timestamp
-
-var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
- 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
- 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
-func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
- if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go b/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go
deleted file mode 100644
index 3566eb9d..00000000
--- a/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2023 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// OrgRequiredWorkflow represents a required workflow object at the org level.
-type OrgRequiredWorkflow struct {
- ID *int64 `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- Path *string `json:"path,omitempty"`
- Scope *string `json:"scope,omitempty"`
- Ref *string `json:"ref,omitempty"`
- State *string `json:"state,omitempty"`
- SelectedRepositoriesURL *string `json:"selected_repositories_url,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- Repository *Repository `json:"repository,omitempty"`
-}
-
-// OrgRequiredWorkflows represents the required workflows for the org.
-type OrgRequiredWorkflows struct {
- TotalCount *int `json:"total_count,omitempty"`
- RequiredWorkflows []*OrgRequiredWorkflow `json:"required_workflows,omitempty"`
-}
-
-// CreateUpdateRequiredWorkflowOptions represents the input object used to create or update required workflows.
-type CreateUpdateRequiredWorkflowOptions struct {
- WorkflowFilePath *string `json:"workflow_file_path,omitempty"`
- RepositoryID *int64 `json:"repository_id,omitempty"`
- Scope *string `json:"scope,omitempty"`
- SelectedRepositoryIDs *SelectedRepoIDs `json:"selected_repository_ids,omitempty"`
-}
-
-// RequiredWorkflowSelectedRepos represents the repos that a required workflow is applied to.
-type RequiredWorkflowSelectedRepos struct {
- TotalCount *int `json:"total_count,omitempty"`
- Repositories []*Repository `json:"repositories,omitempty"`
-}
-
-// RepoRequiredWorkflow represents a required workflow object at the repo level.
-type RepoRequiredWorkflow struct {
- ID *int64 `json:"id,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
- Name *string `json:"name,omitempty"`
- Path *string `json:"path,omitempty"`
- State *string `json:"state,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- BadgeURL *string `json:"badge_url,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- SourceRepository *Repository `json:"source_repository,omitempty"`
-}
-
-// RepoRequiredWorkflows represents the required workflows for a repo.
-type RepoRequiredWorkflows struct {
- TotalCount *int `json:"total_count,omitempty"`
- RequiredWorkflows []*RepoRequiredWorkflow `json:"required_workflows,omitempty"`
-}
-
-// ListOrgRequiredWorkflows lists the RequiredWorkflows for an org.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-required-workflows
-func (s *ActionsService) ListOrgRequiredWorkflows(ctx context.Context, org string, opts *ListOptions) (*OrgRequiredWorkflows, *Response, error) {
- url := fmt.Sprintf("orgs/%v/actions/required_workflows", org)
- u, err := addOptions(url, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- requiredWorkflows := new(OrgRequiredWorkflows)
- resp, err := s.client.Do(ctx, req, &requiredWorkflows)
- if err != nil {
- return nil, resp, err
- }
-
- return requiredWorkflows, resp, nil
-}
-
-// CreateRequiredWorkflow creates the required workflow in an org.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#create-a-required-workflow
-func (s *ActionsService) CreateRequiredWorkflow(ctx context.Context, org string, createRequiredWorkflowOptions *CreateUpdateRequiredWorkflowOptions) (*OrgRequiredWorkflow, *Response, error) {
- url := fmt.Sprintf("orgs/%v/actions/required_workflows", org)
- req, err := s.client.NewRequest("POST", url, createRequiredWorkflowOptions)
- if err != nil {
- return nil, nil, err
- }
-
- orgRequiredWorkflow := new(OrgRequiredWorkflow)
- resp, err := s.client.Do(ctx, req, orgRequiredWorkflow)
- if err != nil {
- return nil, resp, err
- }
-
- return orgRequiredWorkflow, resp, nil
-}
-
-// GetRequiredWorkflowByID get the RequiredWorkflows for an org by its ID.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-required-workflows
-func (s *ActionsService) GetRequiredWorkflowByID(ctx context.Context, owner string, requiredWorkflowID int64) (*OrgRequiredWorkflow, *Response, error) {
- u := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", owner, requiredWorkflowID)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- requiredWorkflow := new(OrgRequiredWorkflow)
- resp, err := s.client.Do(ctx, req, &requiredWorkflow)
- if err != nil {
- return nil, resp, err
- }
-
- return requiredWorkflow, resp, nil
-}
-
-// UpdateRequiredWorkflow updates a required workflow in an org.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#update-a-required-workflow
-func (s *ActionsService) UpdateRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID int64, updateRequiredWorkflowOptions *CreateUpdateRequiredWorkflowOptions) (*OrgRequiredWorkflow, *Response, error) {
- url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", org, requiredWorkflowID)
- req, err := s.client.NewRequest("PATCH", url, updateRequiredWorkflowOptions)
- if err != nil {
- return nil, nil, err
- }
-
- orgRequiredWorkflow := new(OrgRequiredWorkflow)
- resp, err := s.client.Do(ctx, req, orgRequiredWorkflow)
- if err != nil {
- return nil, resp, err
- }
-
- return orgRequiredWorkflow, resp, nil
-}
-
-// DeleteRequiredWorkflow deletes a required workflow in an org.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#update-a-required-workflow
-func (s *ActionsService) DeleteRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID int64) (*Response, error) {
- url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", org, requiredWorkflowID)
- req, err := s.client.NewRequest("DELETE", url, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(ctx, req, nil)
-}
-
-// ListRequiredWorkflowSelectedRepos lists the Repositories selected for a workflow.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-selected-repositories-for-a-required-workflow
-func (s *ActionsService) ListRequiredWorkflowSelectedRepos(ctx context.Context, org string, requiredWorkflowID int64, opts *ListOptions) (*RequiredWorkflowSelectedRepos, *Response, error) {
- url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories", org, requiredWorkflowID)
- u, err := addOptions(url, opts)
- if err != nil {
- return nil, nil, err
- }
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- requiredWorkflowRepos := new(RequiredWorkflowSelectedRepos)
- resp, err := s.client.Do(ctx, req, &requiredWorkflowRepos)
- if err != nil {
- return nil, resp, err
- }
-
- return requiredWorkflowRepos, resp, nil
-}
-
-// SetRequiredWorkflowSelectedRepos sets the Repositories selected for a workflow.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#sets-repositories-for-a-required-workflow
-func (s *ActionsService) SetRequiredWorkflowSelectedRepos(ctx context.Context, org string, requiredWorkflowID int64, ids SelectedRepoIDs) (*Response, error) {
- type repoIDs struct {
- SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"`
- }
- url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories", org, requiredWorkflowID)
- req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids})
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// AddRepoToRequiredWorkflow adds the Repository to a required workflow.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#add-a-repository-to-a-required-workflow
-func (s *ActionsService) AddRepoToRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID, repoID int64) (*Response, error) {
- url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories/%v", org, requiredWorkflowID, repoID)
- req, err := s.client.NewRequest("PUT", url, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(ctx, req, nil)
-}
-
-// RemoveRepoFromRequiredWorkflow removes the Repository from a required workflow.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#add-a-repository-to-a-required-workflow
-func (s *ActionsService) RemoveRepoFromRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID, repoID int64) (*Response, error) {
- url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories/%v", org, requiredWorkflowID, repoID)
- req, err := s.client.NewRequest("DELETE", url, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(ctx, req, nil)
-}
-
-// ListRepoRequiredWorkflows lists the RequiredWorkflows for a repo.
-//
-// Github API docs:https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-repository-required-workflows
-func (s *ActionsService) ListRepoRequiredWorkflows(ctx context.Context, owner, repo string, opts *ListOptions) (*RepoRequiredWorkflows, *Response, error) {
- url := fmt.Sprintf("repos/%v/%v/actions/required_workflows", owner, repo)
- u, err := addOptions(url, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- requiredWorkflows := new(RepoRequiredWorkflows)
- resp, err := s.client.Do(ctx, req, &requiredWorkflows)
- if err != nil {
- return nil, resp, err
- }
-
- return requiredWorkflows, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go b/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go
deleted file mode 100644
index 61f736be..00000000
--- a/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2020 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
-)
-
-// WorkflowRun represents a repository action workflow run.
-type WorkflowRun struct {
- ID *int64 `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
- HeadBranch *string `json:"head_branch,omitempty"`
- HeadSHA *string `json:"head_sha,omitempty"`
- RunNumber *int `json:"run_number,omitempty"`
- RunAttempt *int `json:"run_attempt,omitempty"`
- Event *string `json:"event,omitempty"`
- DisplayTitle *string `json:"display_title,omitempty"`
- Status *string `json:"status,omitempty"`
- Conclusion *string `json:"conclusion,omitempty"`
- WorkflowID *int64 `json:"workflow_id,omitempty"`
- CheckSuiteID *int64 `json:"check_suite_id,omitempty"`
- CheckSuiteNodeID *string `json:"check_suite_node_id,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- PullRequests []*PullRequest `json:"pull_requests,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- RunStartedAt *Timestamp `json:"run_started_at,omitempty"`
- JobsURL *string `json:"jobs_url,omitempty"`
- LogsURL *string `json:"logs_url,omitempty"`
- CheckSuiteURL *string `json:"check_suite_url,omitempty"`
- ArtifactsURL *string `json:"artifacts_url,omitempty"`
- CancelURL *string `json:"cancel_url,omitempty"`
- RerunURL *string `json:"rerun_url,omitempty"`
- PreviousAttemptURL *string `json:"previous_attempt_url,omitempty"`
- HeadCommit *HeadCommit `json:"head_commit,omitempty"`
- WorkflowURL *string `json:"workflow_url,omitempty"`
- Repository *Repository `json:"repository,omitempty"`
- HeadRepository *Repository `json:"head_repository,omitempty"`
- Actor *User `json:"actor,omitempty"`
-}
-
-// WorkflowRuns represents a slice of repository action workflow run.
-type WorkflowRuns struct {
- TotalCount *int `json:"total_count,omitempty"`
- WorkflowRuns []*WorkflowRun `json:"workflow_runs,omitempty"`
-}
-
-// ListWorkflowRunsOptions specifies optional parameters to ListWorkflowRuns.
-type ListWorkflowRunsOptions struct {
- Actor string `url:"actor,omitempty"`
- Branch string `url:"branch,omitempty"`
- Event string `url:"event,omitempty"`
- Status string `url:"status,omitempty"`
- Created string `url:"created,omitempty"`
- HeadSHA string `url:"head_sha,omitempty"`
- ExcludePullRequests bool `url:"exclude_pull_requests,omitempty"`
- CheckSuiteID int64 `url:"check_suite_id,omitempty"`
- ListOptions
-}
-
-// WorkflowRunUsage represents a usage of a specific workflow run.
-type WorkflowRunUsage struct {
- Billable *WorkflowRunBillMap `json:"billable,omitempty"`
- RunDurationMS *int64 `json:"run_duration_ms,omitempty"`
-}
-
-// WorkflowRunBillMap represents different runner environments available for a workflow run.
-// Its key is the name of its environment, e.g. "UBUNTU", "MACOS", "WINDOWS", etc.
-type WorkflowRunBillMap map[string]*WorkflowRunBill
-
-// WorkflowRunBill specifies billable time for a specific environment in a workflow run.
-type WorkflowRunBill struct {
- TotalMS *int64 `json:"total_ms,omitempty"`
- Jobs *int `json:"jobs,omitempty"`
- JobRuns []*WorkflowRunJobRun `json:"job_runs,omitempty"`
-}
-
-// WorkflowRunJobRun represents a usage of individual jobs of a specific workflow run.
-type WorkflowRunJobRun struct {
- JobID *int `json:"job_id,omitempty"`
- DurationMS *int64 `json:"duration_ms,omitempty"`
-}
-
-// WorkflowRunAttemptOptions specifies optional parameters to GetWorkflowRunAttempt.
-type WorkflowRunAttemptOptions struct {
- ExcludePullRequests *bool `url:"exclude_pull_requests,omitempty"`
-}
-
-// PendingDeploymentsRequest specifies body parameters to PendingDeployments.
-type PendingDeploymentsRequest struct {
- EnvironmentIDs []int64 `json:"environment_ids"`
- // State can be one of: "approved", "rejected".
- State string `json:"state"`
- Comment string `json:"comment"`
-}
-
-func (s *ActionsService) listWorkflowRuns(ctx context.Context, endpoint string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) {
- u, err := addOptions(endpoint, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- runs := new(WorkflowRuns)
- resp, err := s.client.Do(ctx, req, &runs)
- if err != nil {
- return nil, resp, err
- }
-
- return runs, resp, nil
-}
-
-// ListWorkflowRunsByID lists all workflow runs by workflow ID.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs
-func (s *ActionsService) ListWorkflowRunsByID(ctx context.Context, owner, repo string, workflowID int64, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/actions/workflows/%v/runs", owner, repo, workflowID)
- return s.listWorkflowRuns(ctx, u, opts)
-}
-
-// ListWorkflowRunsByFileName lists all workflow runs by workflow file name.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs
-func (s *ActionsService) ListWorkflowRunsByFileName(ctx context.Context, owner, repo, workflowFileName string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/actions/workflows/%v/runs", owner, repo, workflowFileName)
- return s.listWorkflowRuns(ctx, u, opts)
-}
-
-// ListRepositoryWorkflowRuns lists all workflow runs for a repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs-for-a-repository
-func (s *ActionsService) ListRepositoryWorkflowRuns(ctx context.Context, owner, repo string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/actions/runs", owner, repo)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- runs := new(WorkflowRuns)
- resp, err := s.client.Do(ctx, req, &runs)
- if err != nil {
- return nil, resp, err
- }
-
- return runs, resp, nil
-}
-
-// GetWorkflowRunByID gets a specific workflow run by ID.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-a-workflow-run
-func (s *ActionsService) GetWorkflowRunByID(ctx context.Context, owner, repo string, runID int64) (*WorkflowRun, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v", owner, repo, runID)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- run := new(WorkflowRun)
- resp, err := s.client.Do(ctx, req, run)
- if err != nil {
- return nil, resp, err
- }
-
- return run, resp, nil
-}
-
-// GetWorkflowRunAttempt gets a specific workflow run attempt.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-a-workflow-run-attempt
-func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo string, runID int64, attemptNumber int, opts *WorkflowRunAttemptOptions) (*WorkflowRun, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v", owner, repo, runID, attemptNumber)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- run := new(WorkflowRun)
- resp, err := s.client.Do(ctx, req, run)
- if err != nil {
- return nil, resp, err
- }
-
- return run, resp, nil
-}
-
-// GetWorkflowRunAttemptLogs gets a redirect URL to download a plain text file of logs for a workflow run for attempt number.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#download-workflow-run-attempt-logs
-func (s *ActionsService) GetWorkflowRunAttemptLogs(ctx context.Context, owner, repo string, runID int64, attemptNumber int, followRedirects bool) (*url.URL, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v/logs", owner, repo, runID, attemptNumber)
-
- resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects)
- if err != nil {
- return nil, nil, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusFound {
- return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
- }
-
- parsedURL, err := url.Parse(resp.Header.Get("Location"))
- return parsedURL, newResponse(resp), err
-}
-
-// RerunWorkflowByID re-runs a workflow by ID.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-a-workflow
-func (s *ActionsService) RerunWorkflowByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/rerun", owner, repo, runID)
-
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// RerunFailedJobsByID re-runs all of the failed jobs and their dependent jobs in a workflow run by ID.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-failed-jobs-from-a-workflow-run
-func (s *ActionsService) RerunFailedJobsByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/rerun-failed-jobs", owner, repo, runID)
-
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// RerunJobByID re-runs a job and its dependent jobs in a workflow run by ID.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-a-job-from-a-workflow-run
-func (s *ActionsService) RerunJobByID(ctx context.Context, owner, repo string, jobID int64) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v/rerun", owner, repo, jobID)
-
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// CancelWorkflowRunByID cancels a workflow run by ID.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#cancel-a-workflow-run
-func (s *ActionsService) CancelWorkflowRunByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/cancel", owner, repo, runID)
-
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// GetWorkflowRunLogs gets a redirect URL to download a plain text file of logs for a workflow run.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#download-workflow-run-logs
-func (s *ActionsService) GetWorkflowRunLogs(ctx context.Context, owner, repo string, runID int64, followRedirects bool) (*url.URL, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/logs", owner, repo, runID)
-
- resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects)
- if err != nil {
- return nil, nil, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusFound {
- return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
- }
-
- parsedURL, err := url.Parse(resp.Header.Get("Location"))
- return parsedURL, newResponse(resp), err
-}
-
-// DeleteWorkflowRun deletes a workflow run by ID.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#delete-a-workflow-run
-func (s *ActionsService) DeleteWorkflowRun(ctx context.Context, owner, repo string, runID int64) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v", owner, repo, runID)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// DeleteWorkflowRunLogs deletes all logs for a workflow run.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#delete-workflow-run-logs
-func (s *ActionsService) DeleteWorkflowRunLogs(ctx context.Context, owner, repo string, runID int64) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/logs", owner, repo, runID)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// GetWorkflowRunUsageByID gets a specific workflow usage run by run ID in the unit of billable milliseconds.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-workflow-run-usage
-func (s *ActionsService) GetWorkflowRunUsageByID(ctx context.Context, owner, repo string, runID int64) (*WorkflowRunUsage, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/timing", owner, repo, runID)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- workflowRunUsage := new(WorkflowRunUsage)
- resp, err := s.client.Do(ctx, req, workflowRunUsage)
- if err != nil {
- return nil, resp, err
- }
-
- return workflowRunUsage, resp, nil
-}
-
-// PendingDeployments approve or reject pending deployments that are waiting on approval by a required reviewer.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#review-pending-deployments-for-a-workflow-run
-func (s *ActionsService) PendingDeployments(ctx context.Context, owner, repo string, runID int64, request *PendingDeploymentsRequest) ([]*Deployment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/pending_deployments", owner, repo, runID)
-
- req, err := s.client.NewRequest("POST", u, request)
- if err != nil {
- return nil, nil, err
- }
-
- var deployments []*Deployment
- resp, err := s.client.Do(ctx, req, &deployments)
- if err != nil {
- return nil, resp, err
- }
-
- return deployments, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/apps.go b/vendor/github.com/google/go-github/v53/github/apps.go
deleted file mode 100644
index ab83d59a..00000000
--- a/vendor/github.com/google/go-github/v53/github/apps.go
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// AppsService provides access to the installation related functions
-// in the GitHub API.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/
-type AppsService service
-
-// App represents a GitHub App.
-type App struct {
- ID *int64 `json:"id,omitempty"`
- Slug *string `json:"slug,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
- Owner *User `json:"owner,omitempty"`
- Name *string `json:"name,omitempty"`
- Description *string `json:"description,omitempty"`
- ExternalURL *string `json:"external_url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- Permissions *InstallationPermissions `json:"permissions,omitempty"`
- Events []string `json:"events,omitempty"`
- InstallationsCount *int `json:"installations_count,omitempty"`
-}
-
-// InstallationToken represents an installation token.
-type InstallationToken struct {
- Token *string `json:"token,omitempty"`
- ExpiresAt *Timestamp `json:"expires_at,omitempty"`
- Permissions *InstallationPermissions `json:"permissions,omitempty"`
- Repositories []*Repository `json:"repositories,omitempty"`
-}
-
-// InstallationTokenOptions allow restricting a token's access to specific repositories.
-type InstallationTokenOptions struct {
- // The IDs of the repositories that the installation token can access.
- // Providing repository IDs restricts the access of an installation token to specific repositories.
- RepositoryIDs []int64 `json:"repository_ids,omitempty"`
-
- // The names of the repositories that the installation token can access.
- // Providing repository names restricts the access of an installation token to specific repositories.
- Repositories []string `json:"repositories,omitempty"`
-
- // The permissions granted to the access token.
- // The permissions object includes the permission names and their access type.
- Permissions *InstallationPermissions `json:"permissions,omitempty"`
-}
-
-// InstallationPermissions lists the repository and organization permissions for an installation.
-//
-// Permission names taken from:
-//
-// https://docs.github.com/en/enterprise-server@3.0/rest/apps#create-an-installation-access-token-for-an-app
-// https://docs.github.com/en/rest/apps#create-an-installation-access-token-for-an-app
-type InstallationPermissions struct {
- Actions *string `json:"actions,omitempty"`
- Administration *string `json:"administration,omitempty"`
- Blocking *string `json:"blocking,omitempty"`
- Checks *string `json:"checks,omitempty"`
- Contents *string `json:"contents,omitempty"`
- ContentReferences *string `json:"content_references,omitempty"`
- Deployments *string `json:"deployments,omitempty"`
- Emails *string `json:"emails,omitempty"`
- Environments *string `json:"environments,omitempty"`
- Followers *string `json:"followers,omitempty"`
- Issues *string `json:"issues,omitempty"`
- Metadata *string `json:"metadata,omitempty"`
- Members *string `json:"members,omitempty"`
- OrganizationAdministration *string `json:"organization_administration,omitempty"`
- OrganizationCustomRoles *string `json:"organization_custom_roles,omitempty"`
- OrganizationHooks *string `json:"organization_hooks,omitempty"`
- OrganizationPackages *string `json:"organization_packages,omitempty"`
- OrganizationPlan *string `json:"organization_plan,omitempty"`
- OrganizationPreReceiveHooks *string `json:"organization_pre_receive_hooks,omitempty"`
- OrganizationProjects *string `json:"organization_projects,omitempty"`
- OrganizationSecrets *string `json:"organization_secrets,omitempty"`
- OrganizationSelfHostedRunners *string `json:"organization_self_hosted_runners,omitempty"`
- OrganizationUserBlocking *string `json:"organization_user_blocking,omitempty"`
- Packages *string `json:"packages,omitempty"`
- Pages *string `json:"pages,omitempty"`
- PullRequests *string `json:"pull_requests,omitempty"`
- RepositoryHooks *string `json:"repository_hooks,omitempty"`
- RepositoryProjects *string `json:"repository_projects,omitempty"`
- RepositoryPreReceiveHooks *string `json:"repository_pre_receive_hooks,omitempty"`
- Secrets *string `json:"secrets,omitempty"`
- SecretScanningAlerts *string `json:"secret_scanning_alerts,omitempty"`
- SecurityEvents *string `json:"security_events,omitempty"`
- SingleFile *string `json:"single_file,omitempty"`
- Statuses *string `json:"statuses,omitempty"`
- TeamDiscussions *string `json:"team_discussions,omitempty"`
- VulnerabilityAlerts *string `json:"vulnerability_alerts,omitempty"`
- Workflows *string `json:"workflows,omitempty"`
-}
-
-// Installation represents a GitHub Apps installation.
-type Installation struct {
- ID *int64 `json:"id,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
- AppID *int64 `json:"app_id,omitempty"`
- AppSlug *string `json:"app_slug,omitempty"`
- TargetID *int64 `json:"target_id,omitempty"`
- Account *User `json:"account,omitempty"`
- AccessTokensURL *string `json:"access_tokens_url,omitempty"`
- RepositoriesURL *string `json:"repositories_url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- TargetType *string `json:"target_type,omitempty"`
- SingleFileName *string `json:"single_file_name,omitempty"`
- RepositorySelection *string `json:"repository_selection,omitempty"`
- Events []string `json:"events,omitempty"`
- SingleFilePaths []string `json:"single_file_paths,omitempty"`
- Permissions *InstallationPermissions `json:"permissions,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- HasMultipleSingleFiles *bool `json:"has_multiple_single_files,omitempty"`
- SuspendedBy *User `json:"suspended_by,omitempty"`
- SuspendedAt *Timestamp `json:"suspended_at,omitempty"`
-}
-
-// Attachment represents a GitHub Apps attachment.
-type Attachment struct {
- ID *int64 `json:"id,omitempty"`
- Title *string `json:"title,omitempty"`
- Body *string `json:"body,omitempty"`
-}
-
-// ContentReference represents a reference to a URL in an issue or pull request.
-type ContentReference struct {
- ID *int64 `json:"id,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
- Reference *string `json:"reference,omitempty"`
-}
-
-func (i Installation) String() string {
- return Stringify(i)
-}
-
-// Get a single GitHub App. Passing the empty string will get
-// the authenticated GitHub App.
-//
-// Note: appSlug is just the URL-friendly name of your GitHub App.
-// You can find this on the settings page for your GitHub App
-// (e.g., https://github.com/settings/apps/:app_slug).
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-the-authenticated-app
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-app
-func (s *AppsService) Get(ctx context.Context, appSlug string) (*App, *Response, error) {
- var u string
- if appSlug != "" {
- u = fmt.Sprintf("apps/%v", appSlug)
- } else {
- u = "app"
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- app := new(App)
- resp, err := s.client.Do(ctx, req, app)
- if err != nil {
- return nil, resp, err
- }
-
- return app, resp, nil
-}
-
-// ListInstallations lists the installations that the current GitHub App has.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#list-installations-for-the-authenticated-app
-func (s *AppsService) ListInstallations(ctx context.Context, opts *ListOptions) ([]*Installation, *Response, error) {
- u, err := addOptions("app/installations", opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var i []*Installation
- resp, err := s.client.Do(ctx, req, &i)
- if err != nil {
- return nil, resp, err
- }
-
- return i, resp, nil
-}
-
-// GetInstallation returns the specified installation.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app
-func (s *AppsService) GetInstallation(ctx context.Context, id int64) (*Installation, *Response, error) {
- return s.getInstallation(ctx, fmt.Sprintf("app/installations/%v", id))
-}
-
-// ListUserInstallations lists installations that are accessible to the authenticated user.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/installations#list-app-installations-accessible-to-the-user-access-token
-func (s *AppsService) ListUserInstallations(ctx context.Context, opts *ListOptions) ([]*Installation, *Response, error) {
- u, err := addOptions("user/installations", opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var i struct {
- Installations []*Installation `json:"installations"`
- }
- resp, err := s.client.Do(ctx, req, &i)
- if err != nil {
- return nil, resp, err
- }
-
- return i.Installations, resp, nil
-}
-
-// SuspendInstallation suspends the specified installation.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#suspend-an-app-installation
-func (s *AppsService) SuspendInstallation(ctx context.Context, id int64) (*Response, error) {
- u := fmt.Sprintf("app/installations/%v/suspended", id)
-
- req, err := s.client.NewRequest("PUT", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// UnsuspendInstallation unsuspends the specified installation.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#unsuspend-an-app-installation
-func (s *AppsService) UnsuspendInstallation(ctx context.Context, id int64) (*Response, error) {
- u := fmt.Sprintf("app/installations/%v/suspended", id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// DeleteInstallation deletes the specified installation.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#delete-an-installation-for-the-authenticated-app
-func (s *AppsService) DeleteInstallation(ctx context.Context, id int64) (*Response, error) {
- u := fmt.Sprintf("app/installations/%v", id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
-
-// CreateInstallationToken creates a new installation token.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app
-func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64, opts *InstallationTokenOptions) (*InstallationToken, *Response, error) {
- u := fmt.Sprintf("app/installations/%v/access_tokens", id)
-
- req, err := s.client.NewRequest("POST", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(InstallationToken)
- resp, err := s.client.Do(ctx, req, t)
- if err != nil {
- return nil, resp, err
- }
-
- return t, resp, nil
-}
-
-// CreateAttachment creates a new attachment on user comment containing a url.
-//
-// TODO: Find GitHub API docs.
-func (s *AppsService) CreateAttachment(ctx context.Context, contentReferenceID int64, title, body string) (*Attachment, *Response, error) {
- u := fmt.Sprintf("content_references/%v/attachments", contentReferenceID)
- payload := &Attachment{Title: String(title), Body: String(body)}
- req, err := s.client.NewRequest("POST", u, payload)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeContentAttachmentsPreview)
-
- m := &Attachment{}
- resp, err := s.client.Do(ctx, req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// FindOrganizationInstallation finds the organization's installation information.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app
-func (s *AppsService) FindOrganizationInstallation(ctx context.Context, org string) (*Installation, *Response, error) {
- return s.getInstallation(ctx, fmt.Sprintf("orgs/%v/installation", org))
-}
-
-// FindRepositoryInstallation finds the repository's installation information.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-a-repository-installation-for-the-authenticated-app
-func (s *AppsService) FindRepositoryInstallation(ctx context.Context, owner, repo string) (*Installation, *Response, error) {
- return s.getInstallation(ctx, fmt.Sprintf("repos/%v/%v/installation", owner, repo))
-}
-
-// FindRepositoryInstallationByID finds the repository's installation information.
-//
-// Note: FindRepositoryInstallationByID uses the undocumented GitHub API endpoint /repositories/:id/installation.
-func (s *AppsService) FindRepositoryInstallationByID(ctx context.Context, id int64) (*Installation, *Response, error) {
- return s.getInstallation(ctx, fmt.Sprintf("repositories/%d/installation", id))
-}
-
-// FindUserInstallation finds the user's installation information.
-//
-// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app
-func (s *AppsService) FindUserInstallation(ctx context.Context, user string) (*Installation, *Response, error) {
- return s.getInstallation(ctx, fmt.Sprintf("users/%v/installation", user))
-}
-
-func (s *AppsService) getInstallation(ctx context.Context, url string) (*Installation, *Response, error) {
- req, err := s.client.NewRequest("GET", url, nil)
- if err != nil {
- return nil, nil, err
- }
-
- i := new(Installation)
- resp, err := s.client.Do(ctx, req, i)
- if err != nil {
- return nil, resp, err
- }
-
- return i, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go b/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go
deleted file mode 100644
index daafc5e6..00000000
--- a/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2020 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// ListRunnerApplicationDownloads lists self-hosted runner application binaries that can be downloaded and run.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-runner-applications-for-an-enterprise
-func (s *EnterpriseService) ListRunnerApplicationDownloads(ctx context.Context, enterprise string) ([]*RunnerApplicationDownload, *Response, error) {
- u := fmt.Sprintf("enterprises/%v/actions/runners/downloads", enterprise)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var rads []*RunnerApplicationDownload
- resp, err := s.client.Do(ctx, req, &rads)
- if err != nil {
- return nil, resp, err
- }
-
- return rads, resp, nil
-}
-
-// CreateRegistrationToken creates a token that can be used to add a self-hosted runner.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-registration-token-for-an-enterprise
-func (s *EnterpriseService) CreateRegistrationToken(ctx context.Context, enterprise string) (*RegistrationToken, *Response, error) {
- u := fmt.Sprintf("enterprises/%v/actions/runners/registration-token", enterprise)
-
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- registrationToken := new(RegistrationToken)
- resp, err := s.client.Do(ctx, req, registrationToken)
- if err != nil {
- return nil, resp, err
- }
-
- return registrationToken, resp, nil
-}
-
-// ListRunners lists all the self-hosted runners for a enterprise.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-self-hosted-runners-for-an-enterprise
-func (s *EnterpriseService) ListRunners(ctx context.Context, enterprise string, opts *ListOptions) (*Runners, *Response, error) {
- u := fmt.Sprintf("enterprises/%v/actions/runners", enterprise)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- runners := &Runners{}
- resp, err := s.client.Do(ctx, req, &runners)
- if err != nil {
- return nil, resp, err
- }
-
- return runners, resp, nil
-}
-
-// RemoveRunner forces the removal of a self-hosted runner from an enterprise using the runner id.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#delete-a-self-hosted-runner-from-an-enterprise
-func (s *EnterpriseService) RemoveRunner(ctx context.Context, enterprise string, runnerID int64) (*Response, error) {
- u := fmt.Sprintf("enterprises/%v/actions/runners/%v", enterprise, runnerID)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
diff --git a/vendor/github.com/google/go-github/v53/github/event.go b/vendor/github.com/google/go-github/v53/github/event.go
deleted file mode 100644
index 4ee25603..00000000
--- a/vendor/github.com/google/go-github/v53/github/event.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2018 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
-)
-
-// Event represents a GitHub event.
-type Event struct {
- Type *string `json:"type,omitempty"`
- Public *bool `json:"public,omitempty"`
- RawPayload *json.RawMessage `json:"payload,omitempty"`
- Repo *Repository `json:"repo,omitempty"`
- Actor *User `json:"actor,omitempty"`
- Org *Organization `json:"org,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- ID *string `json:"id,omitempty"`
-}
-
-func (e Event) String() string {
- return Stringify(e)
-}
-
-// ParsePayload parses the event payload. For recognized event types,
-// a value of the corresponding struct type will be returned.
-func (e *Event) ParsePayload() (payload interface{}, err error) {
- switch *e.Type {
- case "BranchProtectionRuleEvent":
- payload = &BranchProtectionRuleEvent{}
- case "CheckRunEvent":
- payload = &CheckRunEvent{}
- case "CheckSuiteEvent":
- payload = &CheckSuiteEvent{}
- case "CodeScanningAlertEvent":
- payload = &CodeScanningAlertEvent{}
- case "CommitCommentEvent":
- payload = &CommitCommentEvent{}
- case "ContentReferenceEvent":
- payload = &ContentReferenceEvent{}
- case "CreateEvent":
- payload = &CreateEvent{}
- case "DeleteEvent":
- payload = &DeleteEvent{}
- case "DeployKeyEvent":
- payload = &DeployKeyEvent{}
- case "DeploymentEvent":
- payload = &DeploymentEvent{}
- case "DeploymentProtectionRuleEvent":
- payload = &DeploymentProtectionRuleEvent{}
- case "DeploymentStatusEvent":
- payload = &DeploymentStatusEvent{}
- case "DiscussionEvent":
- payload = &DiscussionEvent{}
- case "DiscussionCommentEvent":
- payload = &DiscussionCommentEvent{}
- case "ForkEvent":
- payload = &ForkEvent{}
- case "GitHubAppAuthorizationEvent":
- payload = &GitHubAppAuthorizationEvent{}
- case "GollumEvent":
- payload = &GollumEvent{}
- case "InstallationEvent":
- payload = &InstallationEvent{}
- case "InstallationRepositoriesEvent":
- payload = &InstallationRepositoriesEvent{}
- case "IssueCommentEvent":
- payload = &IssueCommentEvent{}
- case "IssuesEvent":
- payload = &IssuesEvent{}
- case "LabelEvent":
- payload = &LabelEvent{}
- case "MarketplacePurchaseEvent":
- payload = &MarketplacePurchaseEvent{}
- case "MemberEvent":
- payload = &MemberEvent{}
- case "MembershipEvent":
- payload = &MembershipEvent{}
- case "MergeGroupEvent":
- payload = &MergeGroupEvent{}
- case "MetaEvent":
- payload = &MetaEvent{}
- case "MilestoneEvent":
- payload = &MilestoneEvent{}
- case "OrganizationEvent":
- payload = &OrganizationEvent{}
- case "OrgBlockEvent":
- payload = &OrgBlockEvent{}
- case "PackageEvent":
- payload = &PackageEvent{}
- case "PageBuildEvent":
- payload = &PageBuildEvent{}
- case "PingEvent":
- payload = &PingEvent{}
- case "ProjectEvent":
- payload = &ProjectEvent{}
- case "ProjectCardEvent":
- payload = &ProjectCardEvent{}
- case "ProjectColumnEvent":
- payload = &ProjectColumnEvent{}
- case "PublicEvent":
- payload = &PublicEvent{}
- case "PullRequestEvent":
- payload = &PullRequestEvent{}
- case "PullRequestReviewEvent":
- payload = &PullRequestReviewEvent{}
- case "PullRequestReviewCommentEvent":
- payload = &PullRequestReviewCommentEvent{}
- case "PullRequestReviewThreadEvent":
- payload = &PullRequestReviewThreadEvent{}
- case "PullRequestTargetEvent":
- payload = &PullRequestTargetEvent{}
- case "PushEvent":
- payload = &PushEvent{}
- case "ReleaseEvent":
- payload = &ReleaseEvent{}
- case "RepositoryEvent":
- payload = &RepositoryEvent{}
- case "RepositoryDispatchEvent":
- payload = &RepositoryDispatchEvent{}
- case "RepositoryImportEvent":
- payload = &RepositoryImportEvent{}
- case "RepositoryVulnerabilityAlertEvent":
- payload = &RepositoryVulnerabilityAlertEvent{}
- case "SecretScanningAlertEvent":
- payload = &SecretScanningAlertEvent{}
- case "SecurityAdvisoryEvent":
- payload = &SecurityAdvisoryEvent{}
- case "StarEvent":
- payload = &StarEvent{}
- case "StatusEvent":
- payload = &StatusEvent{}
- case "TeamEvent":
- payload = &TeamEvent{}
- case "TeamAddEvent":
- payload = &TeamAddEvent{}
- case "UserEvent":
- payload = &UserEvent{}
- case "WatchEvent":
- payload = &WatchEvent{}
- case "WorkflowDispatchEvent":
- payload = &WorkflowDispatchEvent{}
- case "WorkflowJobEvent":
- payload = &WorkflowJobEvent{}
- case "WorkflowRunEvent":
- payload = &WorkflowRunEvent{}
- }
- err = json.Unmarshal(*e.RawPayload, &payload)
- return payload, err
-}
-
-// Payload returns the parsed event payload. For recognized event types,
-// a value of the corresponding struct type will be returned.
-//
-// Deprecated: Use ParsePayload instead, which returns an error
-// rather than panics if JSON unmarshaling raw payload fails.
-func (e *Event) Payload() (payload interface{}) {
- var err error
- payload, err = e.ParsePayload()
- if err != nil {
- panic(err)
- }
- return payload
-}
diff --git a/vendor/github.com/google/go-github/v53/github/misc.go b/vendor/github.com/google/go-github/v53/github/misc.go
deleted file mode 100644
index 89615241..00000000
--- a/vendor/github.com/google/go-github/v53/github/misc.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "bytes"
- "context"
- "fmt"
- "net/url"
-)
-
-// MarkdownOptions specifies optional parameters to the Markdown method.
-type MarkdownOptions struct {
- // Mode identifies the rendering mode. Possible values are:
- // markdown - render a document as plain Markdown, just like
- // README files are rendered.
- //
- // gfm - to render a document as user-content, e.g. like user
- // comments or issues are rendered. In GFM mode, hard line breaks are
- // always taken into account, and issue and user mentions are linked
- // accordingly.
- //
- // Default is "markdown".
- Mode string
-
- // Context identifies the repository context. Only taken into account
- // when rendering as "gfm".
- Context string
-}
-
-type markdownRequest struct {
- Text *string `json:"text,omitempty"`
- Mode *string `json:"mode,omitempty"`
- Context *string `json:"context,omitempty"`
-}
-
-// Markdown renders an arbitrary Markdown document.
-//
-// GitHub API docs: https://docs.github.com/en/rest/markdown/
-func (c *Client) Markdown(ctx context.Context, text string, opts *MarkdownOptions) (string, *Response, error) {
- request := &markdownRequest{Text: String(text)}
- if opts != nil {
- if opts.Mode != "" {
- request.Mode = String(opts.Mode)
- }
- if opts.Context != "" {
- request.Context = String(opts.Context)
- }
- }
-
- req, err := c.NewRequest("POST", "markdown", request)
- if err != nil {
- return "", nil, err
- }
-
- buf := new(bytes.Buffer)
- resp, err := c.Do(ctx, req, buf)
- if err != nil {
- return "", resp, err
- }
-
- return buf.String(), resp, nil
-}
-
-// ListEmojis returns the emojis available to use on GitHub.
-//
-// GitHub API docs: https://docs.github.com/en/rest/emojis/
-func (c *Client) ListEmojis(ctx context.Context) (map[string]string, *Response, error) {
- req, err := c.NewRequest("GET", "emojis", nil)
- if err != nil {
- return nil, nil, err
- }
-
- var emoji map[string]string
- resp, err := c.Do(ctx, req, &emoji)
- if err != nil {
- return nil, resp, err
- }
-
- return emoji, resp, nil
-}
-
-// CodeOfConduct represents a code of conduct.
-type CodeOfConduct struct {
- Name *string `json:"name,omitempty"`
- Key *string `json:"key,omitempty"`
- URL *string `json:"url,omitempty"`
- Body *string `json:"body,omitempty"`
-}
-
-func (c *CodeOfConduct) String() string {
- return Stringify(c)
-}
-
-// ListCodesOfConduct returns all codes of conduct.
-//
-// GitHub API docs: https://docs.github.com/en/rest/codes_of_conduct/#list-all-codes-of-conduct
-func (c *Client) ListCodesOfConduct(ctx context.Context) ([]*CodeOfConduct, *Response, error) {
- req, err := c.NewRequest("GET", "codes_of_conduct", nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeCodesOfConductPreview)
-
- var cs []*CodeOfConduct
- resp, err := c.Do(ctx, req, &cs)
- if err != nil {
- return nil, resp, err
- }
-
- return cs, resp, nil
-}
-
-// GetCodeOfConduct returns an individual code of conduct.
-//
-// https://docs.github.com/en/rest/codes_of_conduct/#get-an-individual-code-of-conduct
-func (c *Client) GetCodeOfConduct(ctx context.Context, key string) (*CodeOfConduct, *Response, error) {
- u := fmt.Sprintf("codes_of_conduct/%s", key)
- req, err := c.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeCodesOfConductPreview)
-
- coc := new(CodeOfConduct)
- resp, err := c.Do(ctx, req, coc)
- if err != nil {
- return nil, resp, err
- }
-
- return coc, resp, nil
-}
-
-// APIMeta represents metadata about the GitHub API.
-type APIMeta struct {
- // An Array of IP addresses in CIDR format specifying the addresses
- // that incoming service hooks will originate from on GitHub.com.
- Hooks []string `json:"hooks,omitempty"`
-
- // An Array of IP addresses in CIDR format specifying the Git servers
- // for GitHub.com.
- Git []string `json:"git,omitempty"`
-
- // Whether authentication with username and password is supported.
- // (GitHub Enterprise instances using CAS or OAuth for authentication
- // will return false. Features like Basic Authentication with a
- // username and password, sudo mode, and two-factor authentication are
- // not supported on these servers.)
- VerifiablePasswordAuthentication *bool `json:"verifiable_password_authentication,omitempty"`
-
- // An array of IP addresses in CIDR format specifying the addresses
- // which serve GitHub Pages websites.
- Pages []string `json:"pages,omitempty"`
-
- // An Array of IP addresses specifying the addresses that source imports
- // will originate from on GitHub.com.
- Importer []string `json:"importer,omitempty"`
-
- // An array of IP addresses in CIDR format specifying the IP addresses
- // GitHub Actions will originate from.
- Actions []string `json:"actions,omitempty"`
-
- // An array of IP addresses in CIDR format specifying the IP addresses
- // Dependabot will originate from.
- Dependabot []string `json:"dependabot,omitempty"`
-
- // A map of algorithms to SSH key fingerprints.
- SSHKeyFingerprints map[string]string `json:"ssh_key_fingerprints,omitempty"`
-
- // An array of SSH keys.
- SSHKeys []string `json:"ssh_keys,omitempty"`
-
- // An array of IP addresses in CIDR format specifying the addresses
- // which serve GitHub websites.
- Web []string `json:"web,omitempty"`
-
- // An array of IP addresses in CIDR format specifying the addresses
- // which serve GitHub APIs.
- API []string `json:"api,omitempty"`
-}
-
-// APIMeta returns information about GitHub.com, the service. Or, if you access
-// this endpoint on your organization’s GitHub Enterprise installation, this
-// endpoint provides information about that installation.
-//
-// GitHub API docs: https://docs.github.com/en/rest/meta#get-github-meta-information
-func (c *Client) APIMeta(ctx context.Context) (*APIMeta, *Response, error) {
- req, err := c.NewRequest("GET", "meta", nil)
- if err != nil {
- return nil, nil, err
- }
-
- meta := new(APIMeta)
- resp, err := c.Do(ctx, req, meta)
- if err != nil {
- return nil, resp, err
- }
-
- return meta, resp, nil
-}
-
-// Octocat returns an ASCII art octocat with the specified message in a speech
-// bubble. If message is empty, a random zen phrase is used.
-func (c *Client) Octocat(ctx context.Context, message string) (string, *Response, error) {
- u := "octocat"
- if message != "" {
- u = fmt.Sprintf("%s?s=%s", u, url.QueryEscape(message))
- }
-
- req, err := c.NewRequest("GET", u, nil)
- if err != nil {
- return "", nil, err
- }
-
- buf := new(bytes.Buffer)
- resp, err := c.Do(ctx, req, buf)
- if err != nil {
- return "", resp, err
- }
-
- return buf.String(), resp, nil
-}
-
-// Zen returns a random line from The Zen of GitHub.
-//
-// see also: http://warpspire.com/posts/taste/
-func (c *Client) Zen(ctx context.Context) (string, *Response, error) {
- req, err := c.NewRequest("GET", "zen", nil)
- if err != nil {
- return "", nil, err
- }
-
- buf := new(bytes.Buffer)
- resp, err := c.Do(ctx, req, buf)
- if err != nil {
- return "", resp, err
- }
-
- return buf.String(), resp, nil
-}
-
-// ServiceHook represents a hook that has configuration settings, a list of
-// available events, and default events.
-type ServiceHook struct {
- Name *string `json:"name,omitempty"`
- Events []string `json:"events,omitempty"`
- SupportedEvents []string `json:"supported_events,omitempty"`
- Schema [][]string `json:"schema,omitempty"`
-}
-
-func (s *ServiceHook) String() string {
- return Stringify(s)
-}
-
-// ListServiceHooks lists all of the available service hooks.
-//
-// GitHub API docs: https://developer.github.com/webhooks/#services
-func (c *Client) ListServiceHooks(ctx context.Context) ([]*ServiceHook, *Response, error) {
- u := "hooks"
- req, err := c.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var hooks []*ServiceHook
- resp, err := c.Do(ctx, req, &hooks)
- if err != nil {
- return nil, resp, err
- }
-
- return hooks, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go b/vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go
deleted file mode 100644
index e3b35b1d..00000000
--- a/vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2021 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// ActionsAllowed represents selected actions that are allowed.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions
-type ActionsAllowed struct {
- GithubOwnedAllowed *bool `json:"github_owned_allowed,omitempty"`
- VerifiedAllowed *bool `json:"verified_allowed,omitempty"`
- PatternsAllowed []string `json:"patterns_allowed,omitempty"`
-}
-
-func (a ActionsAllowed) String() string {
- return Stringify(a)
-}
-
-// GetActionsAllowed gets the actions that are allowed in an organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-allowed-actions-and-reusable-workflows-for-an-organization
-func (s *OrganizationsService) GetActionsAllowed(ctx context.Context, org string) (*ActionsAllowed, *Response, error) {
- u := fmt.Sprintf("orgs/%v/actions/permissions/selected-actions", org)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- actionsAllowed := new(ActionsAllowed)
- resp, err := s.client.Do(ctx, req, actionsAllowed)
- if err != nil {
- return nil, resp, err
- }
-
- return actionsAllowed, resp, nil
-}
-
-// EditActionsAllowed sets the actions that are allowed in an organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-allowed-actions-and-reusable-workflows-for-an-organization
-func (s *OrganizationsService) EditActionsAllowed(ctx context.Context, org string, actionsAllowed ActionsAllowed) (*ActionsAllowed, *Response, error) {
- u := fmt.Sprintf("orgs/%v/actions/permissions/selected-actions", org)
- req, err := s.client.NewRequest("PUT", u, actionsAllowed)
- if err != nil {
- return nil, nil, err
- }
-
- p := new(ActionsAllowed)
- resp, err := s.client.Do(ctx, req, p)
- if err != nil {
- return nil, resp, err
- }
-
- return p, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go b/vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go
deleted file mode 100644
index 6d1db2ee..00000000
--- a/vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2021 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// ActionsPermissions represents a policy for repositories and allowed actions in an organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions
-type ActionsPermissions struct {
- EnabledRepositories *string `json:"enabled_repositories,omitempty"`
- AllowedActions *string `json:"allowed_actions,omitempty"`
- SelectedActionsURL *string `json:"selected_actions_url,omitempty"`
-}
-
-func (a ActionsPermissions) String() string {
- return Stringify(a)
-}
-
-// GetActionsPermissions gets the GitHub Actions permissions policy for repositories and allowed actions in an organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-github-actions-permissions-for-an-organization
-func (s *OrganizationsService) GetActionsPermissions(ctx context.Context, org string) (*ActionsPermissions, *Response, error) {
- u := fmt.Sprintf("orgs/%v/actions/permissions", org)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- permissions := new(ActionsPermissions)
- resp, err := s.client.Do(ctx, req, permissions)
- if err != nil {
- return nil, resp, err
- }
-
- return permissions, resp, nil
-}
-
-// EditActionsPermissions sets the permissions policy for repositories and allowed actions in an organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-github-actions-permissions-for-an-organization
-func (s *OrganizationsService) EditActionsPermissions(ctx context.Context, org string, actionsPermissions ActionsPermissions) (*ActionsPermissions, *Response, error) {
- u := fmt.Sprintf("orgs/%v/actions/permissions", org)
- req, err := s.client.NewRequest("PUT", u, actionsPermissions)
- if err != nil {
- return nil, nil, err
- }
-
- p := new(ActionsPermissions)
- resp, err := s.client.Do(ctx, req, p)
- if err != nil {
- return nil, resp, err
- }
-
- return p, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go b/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go
deleted file mode 100644
index e2e4692e..00000000
--- a/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2021 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// GetAuditLogOptions sets up optional parameters to query audit-log endpoint.
-type GetAuditLogOptions struct {
- Phrase *string `url:"phrase,omitempty"` // A search phrase. (Optional.)
- Include *string `url:"include,omitempty"` // Event type includes. Can be one of "web", "git", "all". Default: "web". (Optional.)
- Order *string `url:"order,omitempty"` // The order of audit log events. Can be one of "asc" or "desc". Default: "desc". (Optional.)
-
- ListCursorOptions
-}
-
-// HookConfig describes metadata about a webhook configuration.
-type HookConfig struct {
- ContentType *string `json:"content_type,omitempty"`
- InsecureSSL *string `json:"insecure_ssl,omitempty"`
- URL *string `json:"url,omitempty"`
-
- // Secret is returned obfuscated by GitHub, but it can be set for outgoing requests.
- Secret *string `json:"secret,omitempty"`
-}
-
-// ActorLocation contains information about reported location for an actor.
-type ActorLocation struct {
- CountryCode *string `json:"country_code,omitempty"`
-}
-
-// PolicyOverrideReason contains user-supplied information about why a policy was overridden.
-type PolicyOverrideReason struct {
- Code *string `json:"code,omitempty"`
- Message *string `json:"message,omitempty"`
-}
-
-// AuditEntry describes the fields that may be represented by various audit-log "action" entries.
-// For a list of actions see - https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/reviewing-the-audit-log-for-your-organization#audit-log-actions
-type AuditEntry struct {
- ActorIP *string `json:"actor_ip,omitempty"`
- Action *string `json:"action,omitempty"` // The name of the action that was performed, for example `user.login` or `repo.create`.
- Active *bool `json:"active,omitempty"`
- ActiveWas *bool `json:"active_was,omitempty"`
- Actor *string `json:"actor,omitempty"` // The actor who performed the action.
- ActorLocation *ActorLocation `json:"actor_location,omitempty"`
- BlockedUser *string `json:"blocked_user,omitempty"`
- Business *string `json:"business,omitempty"`
- CancelledAt *Timestamp `json:"cancelled_at,omitempty"`
- CompletedAt *Timestamp `json:"completed_at,omitempty"`
- Conclusion *string `json:"conclusion,omitempty"`
- Config *HookConfig `json:"config,omitempty"`
- ConfigWas *HookConfig `json:"config_was,omitempty"`
- ContentType *string `json:"content_type,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- DeployKeyFingerprint *string `json:"deploy_key_fingerprint,omitempty"`
- DocumentID *string `json:"_document_id,omitempty"`
- Emoji *string `json:"emoji,omitempty"`
- EnvironmentName *string `json:"environment_name,omitempty"`
- Event *string `json:"event,omitempty"`
- Events []string `json:"events,omitempty"`
- EventsWere []string `json:"events_were,omitempty"`
- Explanation *string `json:"explanation,omitempty"`
- Fingerprint *string `json:"fingerprint,omitempty"`
- HashedToken *string `json:"hashed_token,omitempty"`
- HeadBranch *string `json:"head_branch,omitempty"`
- HeadSHA *string `json:"head_sha,omitempty"`
- HookID *int64 `json:"hook_id,omitempty"`
- IsHostedRunner *bool `json:"is_hosted_runner,omitempty"`
- JobName *string `json:"job_name,omitempty"`
- JobWorkflowRef *string `json:"job_workflow_ref,omitempty"`
- LimitedAvailability *bool `json:"limited_availability,omitempty"`
- Message *string `json:"message,omitempty"`
- Name *string `json:"name,omitempty"`
- OAuthApplicationID *int64 `json:"oauth_application_id,omitempty"`
- OldUser *string `json:"old_user,omitempty"`
- OldPermission *string `json:"old_permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`.
- OpenSSHPublicKey *string `json:"openssh_public_key,omitempty"`
- OperationType *string `json:"operation_type,omitempty"`
- Org *string `json:"org,omitempty"`
- OrgID *int64 `json:"org_id,omitempty"`
- OverriddenCodes []string `json:"overridden_codes,omitempty"`
- Permission *string `json:"permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`.
- PreviousVisibility *string `json:"previous_visibility,omitempty"`
- ProgrammaticAccessType *string `json:"programmatic_access_type,omitempty"`
- PullRequestID *int64 `json:"pull_request_id,omitempty"`
- PullRequestTitle *string `json:"pull_request_title,omitempty"`
- PullRequestURL *string `json:"pull_request_url,omitempty"`
- ReadOnly *string `json:"read_only,omitempty"`
- Reasons []*PolicyOverrideReason `json:"reasons,omitempty"`
- Repo *string `json:"repo,omitempty"`
- Repository *string `json:"repository,omitempty"`
- RepositoryPublic *bool `json:"repository_public,omitempty"`
- RunAttempt *int64 `json:"run_attempt,omitempty"`
- RunnerGroupID *int64 `json:"runner_group_id,omitempty"`
- RunnerGroupName *string `json:"runner_group_name,omitempty"`
- RunnerID *int64 `json:"runner_id,omitempty"`
- RunnerLabels []string `json:"runner_labels,omitempty"`
- RunnerName *string `json:"runner_name,omitempty"`
- RunNumber *int64 `json:"run_number,omitempty"`
- SecretsPassed []string `json:"secrets_passed,omitempty"`
- SourceVersion *string `json:"source_version,omitempty"`
- StartedAt *Timestamp `json:"started_at,omitempty"`
- TargetLogin *string `json:"target_login,omitempty"`
- TargetVersion *string `json:"target_version,omitempty"`
- Team *string `json:"team,omitempty"`
- Timestamp *Timestamp `json:"@timestamp,omitempty"` // The time the audit log event occurred, given as a [Unix timestamp](http://en.wikipedia.org/wiki/Unix_time).
- TokenID *int64 `json:"token_id,omitempty"`
- TokenScopes *string `json:"token_scopes,omitempty"`
- Topic *string `json:"topic,omitempty"`
- TransportProtocolName *string `json:"transport_protocol_name,omitempty"` // A human readable name for the protocol (for example, HTTP or SSH) used to transfer Git data.
- TransportProtocol *int `json:"transport_protocol,omitempty"` // The type of protocol (for example, HTTP=1 or SSH=2) used to transfer Git data.
- TriggerID *int64 `json:"trigger_id,omitempty"`
- User *string `json:"user,omitempty"` // The user that was affected by the action performed (if available).
- UserAgent *string `json:"user_agent,omitempty"`
- Visibility *string `json:"visibility,omitempty"` // The repository visibility, for example `public` or `private`.
- WorkflowID *int64 `json:"workflow_id,omitempty"`
- WorkflowRunID *int64 `json:"workflow_run_id,omitempty"`
-}
-
-// GetAuditLog gets the audit-log entries for an organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-the-audit-log-for-an-organization
-func (s *OrganizationsService) GetAuditLog(ctx context.Context, org string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) {
- u := fmt.Sprintf("orgs/%v/audit-log", org)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var auditEntries []*AuditEntry
- resp, err := s.client.Do(ctx, req, &auditEntries)
- if err != nil {
- return nil, resp, err
- }
-
- return auditEntries, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/orgs_projects.go b/vendor/github.com/google/go-github/v53/github/orgs_projects.go
deleted file mode 100644
index d49eae54..00000000
--- a/vendor/github.com/google/go-github/v53/github/orgs_projects.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2017 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// ListProjects lists the projects for an organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-organization-projects
-func (s *OrganizationsService) ListProjects(ctx context.Context, org string, opts *ProjectListOptions) ([]*Project, *Response, error) {
- u := fmt.Sprintf("orgs/%v/projects", org)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- var projects []*Project
- resp, err := s.client.Do(ctx, req, &projects)
- if err != nil {
- return nil, resp, err
- }
-
- return projects, resp, nil
-}
-
-// CreateProject creates a GitHub Project for the specified organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-an-organization-project
-func (s *OrganizationsService) CreateProject(ctx context.Context, org string, opts *ProjectOptions) (*Project, *Response, error) {
- u := fmt.Sprintf("orgs/%v/projects", org)
- req, err := s.client.NewRequest("POST", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- project := &Project{}
- resp, err := s.client.Do(ctx, req, project)
- if err != nil {
- return nil, resp, err
- }
-
- return project, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/orgs_rules.go b/vendor/github.com/google/go-github/v53/github/orgs_rules.go
deleted file mode 100644
index a3905af8..00000000
--- a/vendor/github.com/google/go-github/v53/github/orgs_rules.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2023 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// GetAllOrganizationRulesets gets all the rulesets for the specified organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#get-all-organization-repository-rulesets
-func (s *OrganizationsService) GetAllOrganizationRulesets(ctx context.Context, org string) ([]*Ruleset, *Response, error) {
- u := fmt.Sprintf("orgs/%v/rulesets", org)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var rulesets []*Ruleset
- resp, err := s.client.Do(ctx, req, &rulesets)
- if err != nil {
- return nil, resp, err
- }
-
- return rulesets, resp, nil
-}
-
-// CreateOrganizationRuleset creates a ruleset for the specified organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#create-an-organization-repository-ruleset
-func (s *OrganizationsService) CreateOrganizationRuleset(ctx context.Context, org string, rs *Ruleset) (*Ruleset, *Response, error) {
- u := fmt.Sprintf("orgs/%v/rulesets", org)
-
- req, err := s.client.NewRequest("POST", u, rs)
- if err != nil {
- return nil, nil, err
- }
-
- var ruleset *Ruleset
- resp, err := s.client.Do(ctx, req, &ruleset)
- if err != nil {
- return nil, resp, err
- }
-
- return ruleset, resp, nil
-}
-
-// GetOrganizationRuleset gets a ruleset from the specified organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#get-an-organization-repository-ruleset
-func (s *OrganizationsService) GetOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Ruleset, *Response, error) {
- u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var ruleset *Ruleset
- resp, err := s.client.Do(ctx, req, &ruleset)
- if err != nil {
- return nil, resp, err
- }
-
- return ruleset, resp, nil
-}
-
-// UpdateOrganizationRuleset updates a ruleset from the specified organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#update-an-organization-repository-ruleset
-func (s *OrganizationsService) UpdateOrganizationRuleset(ctx context.Context, org string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
- u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
-
- req, err := s.client.NewRequest("PUT", u, rs)
- if err != nil {
- return nil, nil, err
- }
-
- var ruleset *Ruleset
- resp, err := s.client.Do(ctx, req, &ruleset)
- if err != nil {
- return nil, resp, err
- }
-
- return ruleset, resp, nil
-}
-
-// DeleteOrganizationRuleset deletes a ruleset from the specified organization.
-//
-// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#delete-an-organization-repository-ruleset
-func (s *OrganizationsService) DeleteOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
diff --git a/vendor/github.com/google/go-github/v53/github/packages.go b/vendor/github.com/google/go-github/v53/github/packages.go
deleted file mode 100644
index ef7df074..00000000
--- a/vendor/github.com/google/go-github/v53/github/packages.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2020 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-// Package represents a GitHub package.
-type Package struct {
- ID *int64 `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- PackageType *string `json:"package_type,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- Owner *User `json:"owner,omitempty"`
- PackageVersion *PackageVersion `json:"package_version,omitempty"`
- Registry *PackageRegistry `json:"registry,omitempty"`
- URL *string `json:"url,omitempty"`
- VersionCount *int64 `json:"version_count,omitempty"`
- Visibility *string `json:"visibility,omitempty"`
- Repository *Repository `json:"repository,omitempty"`
-}
-
-func (p Package) String() string {
- return Stringify(p)
-}
-
-// PackageVersion represents a GitHub package version.
-type PackageVersion struct {
- ID *int64 `json:"id,omitempty"`
- Version *string `json:"version,omitempty"`
- Summary *string `json:"summary,omitempty"`
- Body *string `json:"body,omitempty"`
- BodyHTML *string `json:"body_html,omitempty"`
- Release *PackageRelease `json:"release,omitempty"`
- Manifest *string `json:"manifest,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- TagName *string `json:"tag_name,omitempty"`
- TargetCommitish *string `json:"target_commitish,omitempty"`
- TargetOID *string `json:"target_oid,omitempty"`
- Draft *bool `json:"draft,omitempty"`
- Prerelease *bool `json:"prerelease,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- PackageFiles []*PackageFile `json:"package_files,omitempty"`
- Author *User `json:"author,omitempty"`
- InstallationCommand *string `json:"installation_command,omitempty"`
- Metadata *PackageMetadata `json:"metadata,omitempty"`
- PackageHTMLURL *string `json:"package_html_url,omitempty"`
- Name *string `json:"name,omitempty"`
- URL *string `json:"url,omitempty"`
-}
-
-func (pv PackageVersion) String() string {
- return Stringify(pv)
-}
-
-// PackageRelease represents a GitHub package version release.
-type PackageRelease struct {
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- ID *int64 `json:"id,omitempty"`
- TagName *string `json:"tag_name,omitempty"`
- TargetCommitish *string `json:"target_commitish,omitempty"`
- Name *string `json:"name,omitempty"`
- Draft *bool `json:"draft,omitempty"`
- Author *User `json:"author,omitempty"`
- Prerelease *bool `json:"prerelease,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- PublishedAt *Timestamp `json:"published_at,omitempty"`
-}
-
-func (r PackageRelease) String() string {
- return Stringify(r)
-}
-
-// PackageFile represents a GitHub package version release file.
-type PackageFile struct {
- DownloadURL *string `json:"download_url,omitempty"`
- ID *int64 `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- SHA256 *string `json:"sha256,omitempty"`
- SHA1 *string `json:"sha1,omitempty"`
- MD5 *string `json:"md5,omitempty"`
- ContentType *string `json:"content_type,omitempty"`
- State *string `json:"state,omitempty"`
- Author *User `json:"author,omitempty"`
- Size *int64 `json:"size,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
-}
-
-func (pf PackageFile) String() string {
- return Stringify(pf)
-}
-
-// PackageRegistry represents a GitHub package registry.
-type PackageRegistry struct {
- AboutURL *string `json:"about_url,omitempty"`
- Name *string `json:"name,omitempty"`
- Type *string `json:"type,omitempty"`
- URL *string `json:"url,omitempty"`
- Vendor *string `json:"vendor,omitempty"`
-}
-
-func (r PackageRegistry) String() string {
- return Stringify(r)
-}
-
-// PackageListOptions represents the optional list options for a package.
-type PackageListOptions struct {
- // Visibility of packages "public", "internal" or "private".
- Visibility *string `url:"visibility,omitempty"`
-
- // PackageType represents the type of package.
- // It can be one of "npm", "maven", "rubygems", "nuget", "docker", or "container".
- PackageType *string `url:"package_type,omitempty"`
-
- // State of package either "active" or "deleted".
- State *string `url:"state,omitempty"`
-
- ListOptions
-}
-
-// PackageMetadata represents metadata from a package.
-type PackageMetadata struct {
- PackageType *string `json:"package_type,omitempty"`
- Container *PackageContainerMetadata `json:"container,omitempty"`
-}
-
-func (r PackageMetadata) String() string {
- return Stringify(r)
-}
-
-// PackageContainerMetadata represents container metadata for docker container packages.
-type PackageContainerMetadata struct {
- Tags []string `json:"tags,omitempty"`
-}
-
-func (r PackageContainerMetadata) String() string {
- return Stringify(r)
-}
diff --git a/vendor/github.com/google/go-github/v53/github/projects.go b/vendor/github.com/google/go-github/v53/github/projects.go
deleted file mode 100644
index df7ad6cd..00000000
--- a/vendor/github.com/google/go-github/v53/github/projects.go
+++ /dev/null
@@ -1,596 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// ProjectsService provides access to the projects functions in the
-// GitHub API.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects
-type ProjectsService service
-
-// Project represents a GitHub Project.
-type Project struct {
- ID *int64 `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- ColumnsURL *string `json:"columns_url,omitempty"`
- OwnerURL *string `json:"owner_url,omitempty"`
- Name *string `json:"name,omitempty"`
- Body *string `json:"body,omitempty"`
- Number *int `json:"number,omitempty"`
- State *string `json:"state,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
- OrganizationPermission *string `json:"organization_permission,omitempty"`
- Private *bool `json:"private,omitempty"`
-
- // The User object that generated the project.
- Creator *User `json:"creator,omitempty"`
-}
-
-func (p Project) String() string {
- return Stringify(p)
-}
-
-// GetProject gets a GitHub Project for a repo.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#get-a-project
-func (s *ProjectsService) GetProject(ctx context.Context, id int64) (*Project, *Response, error) {
- u := fmt.Sprintf("projects/%v", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- project := &Project{}
- resp, err := s.client.Do(ctx, req, project)
- if err != nil {
- return nil, resp, err
- }
-
- return project, resp, nil
-}
-
-// ProjectOptions specifies the parameters to the
-// RepositoriesService.CreateProject and
-// ProjectsService.UpdateProject methods.
-type ProjectOptions struct {
- // The name of the project. (Required for creation; optional for update.)
- Name *string `json:"name,omitempty"`
- // The body of the project. (Optional.)
- Body *string `json:"body,omitempty"`
-
- // The following field(s) are only applicable for update.
- // They should be left with zero values for creation.
-
- // State of the project. Either "open" or "closed". (Optional.)
- State *string `json:"state,omitempty"`
- // The permission level that all members of the project's organization
- // will have on this project.
- // Setting the organization permission is only available
- // for organization projects. (Optional.)
- OrganizationPermission *string `json:"organization_permission,omitempty"`
- // Sets visibility of the project within the organization.
- // Setting visibility is only available
- // for organization projects.(Optional.)
- Private *bool `json:"private,omitempty"`
-}
-
-// UpdateProject updates a repository project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#update-a-project
-func (s *ProjectsService) UpdateProject(ctx context.Context, id int64, opts *ProjectOptions) (*Project, *Response, error) {
- u := fmt.Sprintf("projects/%v", id)
- req, err := s.client.NewRequest("PATCH", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- project := &Project{}
- resp, err := s.client.Do(ctx, req, project)
- if err != nil {
- return nil, resp, err
- }
-
- return project, resp, nil
-}
-
-// DeleteProject deletes a GitHub Project from a repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#delete-a-project
-func (s *ProjectsService) DeleteProject(ctx context.Context, id int64) (*Response, error) {
- u := fmt.Sprintf("projects/%v", id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(ctx, req, nil)
-}
-
-// ProjectColumn represents a column of a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/repos/projects/
-type ProjectColumn struct {
- ID *int64 `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- URL *string `json:"url,omitempty"`
- ProjectURL *string `json:"project_url,omitempty"`
- CardsURL *string `json:"cards_url,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
-}
-
-// ListProjectColumns lists the columns of a GitHub Project for a repo.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/columns#list-project-columns
-func (s *ProjectsService) ListProjectColumns(ctx context.Context, projectID int64, opts *ListOptions) ([]*ProjectColumn, *Response, error) {
- u := fmt.Sprintf("projects/%v/columns", projectID)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- columns := []*ProjectColumn{}
- resp, err := s.client.Do(ctx, req, &columns)
- if err != nil {
- return nil, resp, err
- }
-
- return columns, resp, nil
-}
-
-// GetProjectColumn gets a column of a GitHub Project for a repo.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/columns#get-a-project-column
-func (s *ProjectsService) GetProjectColumn(ctx context.Context, id int64) (*ProjectColumn, *Response, error) {
- u := fmt.Sprintf("projects/columns/%v", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- column := &ProjectColumn{}
- resp, err := s.client.Do(ctx, req, column)
- if err != nil {
- return nil, resp, err
- }
-
- return column, resp, nil
-}
-
-// ProjectColumnOptions specifies the parameters to the
-// ProjectsService.CreateProjectColumn and
-// ProjectsService.UpdateProjectColumn methods.
-type ProjectColumnOptions struct {
- // The name of the project column. (Required for creation and update.)
- Name string `json:"name"`
-}
-
-// CreateProjectColumn creates a column for the specified (by number) project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/columns#create-a-project-column
-func (s *ProjectsService) CreateProjectColumn(ctx context.Context, projectID int64, opts *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
- u := fmt.Sprintf("projects/%v/columns", projectID)
- req, err := s.client.NewRequest("POST", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- column := &ProjectColumn{}
- resp, err := s.client.Do(ctx, req, column)
- if err != nil {
- return nil, resp, err
- }
-
- return column, resp, nil
-}
-
-// UpdateProjectColumn updates a column of a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/columns#update-an-existing-project-column
-func (s *ProjectsService) UpdateProjectColumn(ctx context.Context, columnID int64, opts *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
- u := fmt.Sprintf("projects/columns/%v", columnID)
- req, err := s.client.NewRequest("PATCH", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- column := &ProjectColumn{}
- resp, err := s.client.Do(ctx, req, column)
- if err != nil {
- return nil, resp, err
- }
-
- return column, resp, nil
-}
-
-// DeleteProjectColumn deletes a column from a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/columns#delete-a-project-column
-func (s *ProjectsService) DeleteProjectColumn(ctx context.Context, columnID int64) (*Response, error) {
- u := fmt.Sprintf("projects/columns/%v", columnID)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(ctx, req, nil)
-}
-
-// ProjectColumnMoveOptions specifies the parameters to the
-// ProjectsService.MoveProjectColumn method.
-type ProjectColumnMoveOptions struct {
- // Position can be one of "first", "last", or "after:", where
- // is the ID of a column in the same project. (Required.)
- Position string `json:"position"`
-}
-
-// MoveProjectColumn moves a column within a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/columns#move-a-project-column
-func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int64, opts *ProjectColumnMoveOptions) (*Response, error) {
- u := fmt.Sprintf("projects/columns/%v/moves", columnID)
- req, err := s.client.NewRequest("POST", u, opts)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(ctx, req, nil)
-}
-
-// ProjectCard represents a card in a column of a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/cards/#get-a-project-card
-type ProjectCard struct {
- URL *string `json:"url,omitempty"`
- ColumnURL *string `json:"column_url,omitempty"`
- ContentURL *string `json:"content_url,omitempty"`
- ID *int64 `json:"id,omitempty"`
- Note *string `json:"note,omitempty"`
- Creator *User `json:"creator,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
- Archived *bool `json:"archived,omitempty"`
-
- // The following fields are only populated by Webhook events.
- ColumnID *int64 `json:"column_id,omitempty"`
-
- // The following fields are only populated by Events API.
- ProjectID *int64 `json:"project_id,omitempty"`
- ProjectURL *string `json:"project_url,omitempty"`
- ColumnName *string `json:"column_name,omitempty"`
- PreviousColumnName *string `json:"previous_column_name,omitempty"` // Populated in "moved_columns_in_project" event deliveries.
-}
-
-// ProjectCardListOptions specifies the optional parameters to the
-// ProjectsService.ListProjectCards method.
-type ProjectCardListOptions struct {
- // ArchivedState is used to list all, archived, or not_archived project cards.
- // Defaults to not_archived when you omit this parameter.
- ArchivedState *string `url:"archived_state,omitempty"`
-
- ListOptions
-}
-
-// ListProjectCards lists the cards in a column of a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/cards#list-project-cards
-func (s *ProjectsService) ListProjectCards(ctx context.Context, columnID int64, opts *ProjectCardListOptions) ([]*ProjectCard, *Response, error) {
- u := fmt.Sprintf("projects/columns/%v/cards", columnID)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- cards := []*ProjectCard{}
- resp, err := s.client.Do(ctx, req, &cards)
- if err != nil {
- return nil, resp, err
- }
-
- return cards, resp, nil
-}
-
-// GetProjectCard gets a card in a column of a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/cards#get-a-project-card
-func (s *ProjectsService) GetProjectCard(ctx context.Context, cardID int64) (*ProjectCard, *Response, error) {
- u := fmt.Sprintf("projects/columns/cards/%v", cardID)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- card := &ProjectCard{}
- resp, err := s.client.Do(ctx, req, card)
- if err != nil {
- return nil, resp, err
- }
-
- return card, resp, nil
-}
-
-// ProjectCardOptions specifies the parameters to the
-// ProjectsService.CreateProjectCard and
-// ProjectsService.UpdateProjectCard methods.
-type ProjectCardOptions struct {
- // The note of the card. Note and ContentID are mutually exclusive.
- Note string `json:"note,omitempty"`
- // The ID (not Number) of the Issue to associate with this card.
- // Note and ContentID are mutually exclusive.
- ContentID int64 `json:"content_id,omitempty"`
- // The type of content to associate with this card. Possible values are: "Issue" and "PullRequest".
- ContentType string `json:"content_type,omitempty"`
- // Use true to archive a project card.
- // Specify false if you need to restore a previously archived project card.
- Archived *bool `json:"archived,omitempty"`
-}
-
-// CreateProjectCard creates a card in the specified column of a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/cards#create-a-project-card
-func (s *ProjectsService) CreateProjectCard(ctx context.Context, columnID int64, opts *ProjectCardOptions) (*ProjectCard, *Response, error) {
- u := fmt.Sprintf("projects/columns/%v/cards", columnID)
- req, err := s.client.NewRequest("POST", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- card := &ProjectCard{}
- resp, err := s.client.Do(ctx, req, card)
- if err != nil {
- return nil, resp, err
- }
-
- return card, resp, nil
-}
-
-// UpdateProjectCard updates a card of a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/cards#update-an-existing-project-card
-func (s *ProjectsService) UpdateProjectCard(ctx context.Context, cardID int64, opts *ProjectCardOptions) (*ProjectCard, *Response, error) {
- u := fmt.Sprintf("projects/columns/cards/%v", cardID)
- req, err := s.client.NewRequest("PATCH", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- card := &ProjectCard{}
- resp, err := s.client.Do(ctx, req, card)
- if err != nil {
- return nil, resp, err
- }
-
- return card, resp, nil
-}
-
-// DeleteProjectCard deletes a card from a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/cards#delete-a-project-card
-func (s *ProjectsService) DeleteProjectCard(ctx context.Context, cardID int64) (*Response, error) {
- u := fmt.Sprintf("projects/columns/cards/%v", cardID)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(ctx, req, nil)
-}
-
-// ProjectCardMoveOptions specifies the parameters to the
-// ProjectsService.MoveProjectCard method.
-type ProjectCardMoveOptions struct {
- // Position can be one of "top", "bottom", or "after:", where
- // is the ID of a card in the same project.
- Position string `json:"position"`
- // ColumnID is the ID of a column in the same project. Note that ColumnID
- // is required when using Position "after:" when that card is in
- // another column; otherwise it is optional.
- ColumnID int64 `json:"column_id,omitempty"`
-}
-
-// MoveProjectCard moves a card within a GitHub Project.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/cards#move-a-project-card
-func (s *ProjectsService) MoveProjectCard(ctx context.Context, cardID int64, opts *ProjectCardMoveOptions) (*Response, error) {
- u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID)
- req, err := s.client.NewRequest("POST", u, opts)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(ctx, req, nil)
-}
-
-// ProjectCollaboratorOptions specifies the optional parameters to the
-// ProjectsService.AddProjectCollaborator method.
-type ProjectCollaboratorOptions struct {
- // Permission specifies the permission to grant to the collaborator.
- // Possible values are:
- // "read" - can read, but not write to or administer this project.
- // "write" - can read and write, but not administer this project.
- // "admin" - can read, write and administer this project.
- //
- // Default value is "write"
- Permission *string `json:"permission,omitempty"`
-}
-
-// AddProjectCollaborator adds a collaborator to an organization project and sets
-// their permission level. You must be an organization owner or a project admin to add a collaborator.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#add-project-collaborator
-func (s *ProjectsService) AddProjectCollaborator(ctx context.Context, id int64, username string, opts *ProjectCollaboratorOptions) (*Response, error) {
- u := fmt.Sprintf("projects/%v/collaborators/%v", id, username)
- req, err := s.client.NewRequest("PUT", u, opts)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(ctx, req, nil)
-}
-
-// RemoveProjectCollaborator removes a collaborator from an organization project.
-// You must be an organization owner or a project admin to remove a collaborator.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#remove-user-as-a-collaborator
-func (s *ProjectsService) RemoveProjectCollaborator(ctx context.Context, id int64, username string) (*Response, error) {
- u := fmt.Sprintf("projects/%v/collaborators/%v", id, username)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(ctx, req, nil)
-}
-
-// ListCollaboratorOptions specifies the optional parameters to the
-// ProjectsService.ListProjectCollaborators method.
-type ListCollaboratorOptions struct {
- // Affiliation specifies how collaborators should be filtered by their affiliation.
- // Possible values are:
- // "outside" - All outside collaborators of an organization-owned repository
- // "direct" - All collaborators with permissions to an organization-owned repository,
- // regardless of organization membership status
- // "all" - All collaborators the authenticated user can see
- //
- // Default value is "all".
- Affiliation *string `url:"affiliation,omitempty"`
-
- ListOptions
-}
-
-// ListProjectCollaborators lists the collaborators for an organization project. For a project,
-// the list of collaborators includes outside collaborators, organization members that are direct
-// collaborators, organization members with access through team memberships, organization members
-// with access through default organization permissions, and organization owners. You must be an
-// organization owner or a project admin to list collaborators.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#list-project-collaborators
-func (s *ProjectsService) ListProjectCollaborators(ctx context.Context, id int64, opts *ListCollaboratorOptions) ([]*User, *Response, error) {
- u := fmt.Sprintf("projects/%v/collaborators", id)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- var users []*User
- resp, err := s.client.Do(ctx, req, &users)
- if err != nil {
- return nil, resp, err
- }
-
- return users, resp, nil
-}
-
-// ProjectPermissionLevel represents the permission level an organization
-// member has for a given project.
-type ProjectPermissionLevel struct {
- // Possible values: "admin", "write", "read", "none"
- Permission *string `json:"permission,omitempty"`
-
- User *User `json:"user,omitempty"`
-}
-
-// ReviewProjectCollaboratorPermission returns the collaborator's permission level for an organization
-// project. Possible values for the permission key: "admin", "write", "read", "none".
-// You must be an organization owner or a project admin to review a user's permission level.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#get-project-permission-for-a-user
-func (s *ProjectsService) ReviewProjectCollaboratorPermission(ctx context.Context, id int64, username string) (*ProjectPermissionLevel, *Response, error) {
- u := fmt.Sprintf("projects/%v/collaborators/%v/permission", id, username)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- ppl := new(ProjectPermissionLevel)
- resp, err := s.client.Do(ctx, req, ppl)
- if err != nil {
- return nil, resp, err
- }
- return ppl, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go b/vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go
deleted file mode 100644
index 45f844ce..00000000
--- a/vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2022 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// ActionsPermissionsRepository represents a policy for repositories and allowed actions in a repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions
-type ActionsPermissionsRepository struct {
- Enabled *bool `json:"enabled,omitempty"`
- AllowedActions *string `json:"allowed_actions,omitempty"`
- SelectedActionsURL *string `json:"selected_actions_url,omitempty"`
-}
-
-func (a ActionsPermissionsRepository) String() string {
- return Stringify(a)
-}
-
-// GetActionsPermissions gets the GitHub Actions permissions policy for repositories and allowed actions in a repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-github-actions-permissions-for-a-repository
-func (s *RepositoriesService) GetActionsPermissions(ctx context.Context, owner, repo string) (*ActionsPermissionsRepository, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/permissions", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- permissions := new(ActionsPermissionsRepository)
- resp, err := s.client.Do(ctx, req, permissions)
- if err != nil {
- return nil, resp, err
- }
-
- return permissions, resp, nil
-}
-
-// EditActionsPermissions sets the permissions policy for repositories and allowed actions in a repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-github-actions-permissions-for-a-repository
-func (s *RepositoriesService) EditActionsPermissions(ctx context.Context, owner, repo string, actionsPermissionsRepository ActionsPermissionsRepository) (*ActionsPermissionsRepository, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/actions/permissions", owner, repo)
- req, err := s.client.NewRequest("PUT", u, actionsPermissionsRepository)
- if err != nil {
- return nil, nil, err
- }
-
- permissions := new(ActionsPermissionsRepository)
- resp, err := s.client.Do(ctx, req, permissions)
- if err != nil {
- return nil, resp, err
- }
-
- return permissions, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/repos_projects.go b/vendor/github.com/google/go-github/v53/github/repos_projects.go
deleted file mode 100644
index a3001dee..00000000
--- a/vendor/github.com/google/go-github/v53/github/repos_projects.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// ProjectListOptions specifies the optional parameters to the
-// OrganizationsService.ListProjects and RepositoriesService.ListProjects methods.
-type ProjectListOptions struct {
- // Indicates the state of the projects to return. Can be either open, closed, or all. Default: open
- State string `url:"state,omitempty"`
-
- ListOptions
-}
-
-// ListProjects lists the projects for a repo.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-repository-projects
-func (s *RepositoriesService) ListProjects(ctx context.Context, owner, repo string, opts *ProjectListOptions) ([]*Project, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects", owner, repo)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- var projects []*Project
- resp, err := s.client.Do(ctx, req, &projects)
- if err != nil {
- return nil, resp, err
- }
-
- return projects, resp, nil
-}
-
-// CreateProject creates a GitHub Project for the specified repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-a-repository-project
-func (s *RepositoriesService) CreateProject(ctx context.Context, owner, repo string, opts *ProjectOptions) (*Project, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects", owner, repo)
- req, err := s.client.NewRequest("POST", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept headers when APIs fully launch.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- project := &Project{}
- resp, err := s.client.Do(ctx, req, project)
- if err != nil {
- return nil, resp, err
- }
-
- return project, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/github/repos_rules.go b/vendor/github.com/google/go-github/v53/github/repos_rules.go
deleted file mode 100644
index 9299d3e7..00000000
--- a/vendor/github.com/google/go-github/v53/github/repos_rules.go
+++ /dev/null
@@ -1,447 +0,0 @@
-// Copyright 2023 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "encoding/json"
- "fmt"
-)
-
-// BypassActor represents the bypass actors from a ruleset.
-type BypassActor struct {
- ActorID *int64 `json:"actor_id,omitempty"`
- // Possible values for ActorType are: Team, Integration
- ActorType *string `json:"actor_type,omitempty"`
-}
-
-// RulesetLink represents a single link object from GitHub ruleset request _links.
-type RulesetLink struct {
- HRef *string `json:"href,omitempty"`
-}
-
-// RulesetLinks represents the "_links" object in a Ruleset.
-type RulesetLinks struct {
- Self *RulesetLink `json:"self,omitempty"`
-}
-
-// RulesetRefConditionParameters represents the conditions object for ref_names.
-type RulesetRefConditionParameters struct {
- Include []string `json:"include"`
- Exclude []string `json:"exclude"`
-}
-
-// RulesetRepositoryConditionParameters represents the conditions object for repository_names.
-type RulesetRepositoryConditionParameters struct {
- Include []string `json:"include,omitempty"`
- Exclude []string `json:"exclude,omitempty"`
- Protected *bool `json:"protected,omitempty"`
-}
-
-// RulesetCondition represents the conditions object in a ruleset.
-type RulesetConditions struct {
- RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"`
- RepositoryName *RulesetRepositoryConditionParameters `json:"repository_name,omitempty"`
-}
-
-// RulePatternParameters represents the rule pattern parameters.
-type RulePatternParameters struct {
- Name *string `json:"name,omitempty"`
- // If Negate is true, the rule will fail if the pattern matches.
- Negate *bool `json:"negate,omitempty"`
- // Possible values for Operator are: starts_with, ends_with, contains, regex
- Operator string `json:"operator"`
- Pattern string `json:"pattern"`
-}
-
-// UpdateAllowsFetchAndMergeRuleParameters represents the update rule parameters.
-type UpdateAllowsFetchAndMergeRuleParameters struct {
- UpdateAllowsFetchAndMerge bool `json:"update_allows_fetch_and_merge"`
-}
-
-// RequiredDeploymentEnvironmentsRuleParameters represents the required_deployments rule parameters.
-type RequiredDeploymentEnvironmentsRuleParameters struct {
- RequiredDeploymentEnvironments []string `json:"required_deployment_environments"`
-}
-
-// PullRequestRuleParameters represents the pull_request rule parameters.
-type PullRequestRuleParameters struct {
- DismissStaleReviewsOnPush bool `json:"dismiss_stale_reviews_on_push"`
- RequireCodeOwnerReview bool `json:"require_code_owner_review"`
- RequireLastPushApproval bool `json:"require_last_push_approval"`
- RequiredApprovingReviewCount int `json:"required_approving_review_count"`
- RequiredReviewThreadResolution bool `json:"required_review_thread_resolution"`
-}
-
-// RuleRequiredStatusChecks represents the RequiredStatusChecks for the RequiredStatusChecksRuleParameters object.
-type RuleRequiredStatusChecks struct {
- Context string `json:"context"`
- IntegrationID *int64 `json:"integration_id,omitempty"`
-}
-
-// RequiredStatusChecksRuleParameters represents the required_status_checks rule parameters.
-type RequiredStatusChecksRuleParameters struct {
- RequiredStatusChecks []RuleRequiredStatusChecks `json:"required_status_checks"`
- StrictRequiredStatusChecksPolicy bool `json:"strict_required_status_checks_policy"`
-}
-
-// RepositoryRule represents a GitHub Rule.
-type RepositoryRule struct {
- Type string `json:"type"`
- Parameters *json.RawMessage `json:"parameters,omitempty"`
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-// This helps us handle the fact that RepositoryRule parameter field can be of numerous types.
-func (r *RepositoryRule) UnmarshalJSON(data []byte) error {
- type rule RepositoryRule
- var RepositoryRule rule
- if err := json.Unmarshal(data, &RepositoryRule); err != nil {
- return err
- }
-
- r.Type = RepositoryRule.Type
-
- switch RepositoryRule.Type {
- case "creation", "deletion", "required_linear_history", "required_signatures", "non_fast_forward":
- r.Parameters = nil
- case "update":
- params := UpdateAllowsFetchAndMergeRuleParameters{}
- if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil {
- return err
- }
-
- bytes, _ := json.Marshal(params)
- rawParams := json.RawMessage(bytes)
-
- r.Parameters = &rawParams
- case "required_deployments":
- params := RequiredDeploymentEnvironmentsRuleParameters{}
- if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil {
- return err
- }
-
- bytes, _ := json.Marshal(params)
- rawParams := json.RawMessage(bytes)
-
- r.Parameters = &rawParams
- case "commit_message_pattern", "commit_author_email_pattern", "committer_email_pattern", "branch_name_pattern", "tag_name_pattern":
- params := RulePatternParameters{}
- if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil {
- return err
- }
-
- bytes, _ := json.Marshal(params)
- rawParams := json.RawMessage(bytes)
-
- r.Parameters = &rawParams
- case "pull_request":
- params := PullRequestRuleParameters{}
- if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil {
- return err
- }
-
- bytes, _ := json.Marshal(params)
- rawParams := json.RawMessage(bytes)
-
- r.Parameters = &rawParams
- case "required_status_checks":
- params := RequiredStatusChecksRuleParameters{}
- if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil {
- return err
- }
-
- bytes, _ := json.Marshal(params)
- rawParams := json.RawMessage(bytes)
-
- r.Parameters = &rawParams
- default:
- r.Type = ""
- r.Parameters = nil
- return fmt.Errorf("RepositoryRule.Type %T is not yet implemented, unable to unmarshal", RepositoryRule.Type)
- }
-
- return nil
-}
-
-// NewCreationRule creates a rule to only allow users with bypass permission to create matching refs.
-func NewCreationRule() (rule *RepositoryRule) {
- return &RepositoryRule{
- Type: "creation",
- }
-}
-
-// NewUpdateRule creates a rule to only allow users with bypass permission to update matching refs.
-func NewUpdateRule(params *UpdateAllowsFetchAndMergeRuleParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "update",
- Parameters: &rawParams,
- }
-}
-
-// NewDeletionRule creates a rule to only allow users with bypass permissions to delete matching refs.
-func NewDeletionRule() (rule *RepositoryRule) {
- return &RepositoryRule{
- Type: "deletion",
- }
-}
-
-// NewRequiredLinearHistoryRule creates a rule to prevent merge commits from being pushed to matching branches.
-func NewRequiredLinearHistoryRule() (rule *RepositoryRule) {
- return &RepositoryRule{
- Type: "required_linear_history",
- }
-}
-
-// NewRequiredDeploymentsRule creates a rule to require environments to be successfully deployed before they can be merged into the matching branches.
-func NewRequiredDeploymentsRule(params *RequiredDeploymentEnvironmentsRuleParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "required_deployments",
- Parameters: &rawParams,
- }
-}
-
-// NewRequiredSignaturesRule creates a rule a to require commits pushed to matching branches to have verified signatures.
-func NewRequiredSignaturesRule() (rule *RepositoryRule) {
- return &RepositoryRule{
- Type: "required_signatures",
- }
-}
-
-// NewPullRequestRule creates a rule to require all commits be made to a non-target branch and submitted via a pull request before they can be merged.
-func NewPullRequestRule(params *PullRequestRuleParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "pull_request",
- Parameters: &rawParams,
- }
-}
-
-// NewRequiredStatusChecksRule creates a rule to require which status checks must pass before branches can be merged into a branch rule.
-func NewRequiredStatusChecksRule(params *RequiredStatusChecksRuleParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "required_status_checks",
- Parameters: &rawParams,
- }
-}
-
-// NewNonFastForwardRule creates a rule as part to prevent users with push access from force pushing to matching branches.
-func NewNonFastForwardRule() (rule *RepositoryRule) {
- return &RepositoryRule{
- Type: "non_fast_forward",
- }
-}
-
-// NewCommitMessagePatternRule creates a rule to restrict commit message patterns being pushed to matching branches.
-func NewCommitMessagePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "commit_message_pattern",
- Parameters: &rawParams,
- }
-}
-
-// NewCommitAuthorEmailPatternRule creates a rule to restrict commits with author email patterns being merged into matching branches.
-func NewCommitAuthorEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "commit_author_email_pattern",
- Parameters: &rawParams,
- }
-}
-
-// NewCommitterEmailPatternRule creates a rule to restrict commits with committer email patterns being merged into matching branches.
-func NewCommitterEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "committer_email_pattern",
- Parameters: &rawParams,
- }
-}
-
-// NewBranchNamePatternRule creates a rule to restrict branch patterns from being merged into matching branches.
-func NewBranchNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "branch_name_pattern",
- Parameters: &rawParams,
- }
-}
-
-// NewTagNamePatternRule creates a rule to restrict tag patterns contained in non-target branches from being merged into matching branches.
-func NewTagNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
- bytes, _ := json.Marshal(params)
-
- rawParams := json.RawMessage(bytes)
-
- return &RepositoryRule{
- Type: "tag_name_pattern",
- Parameters: &rawParams,
- }
-}
-
-// Ruleset represents a GitHub ruleset object.
-type Ruleset struct {
- ID int64 `json:"id"`
- Name string `json:"name"`
- // Possible values for Target are branch, tag
- Target *string `json:"target,omitempty"`
- // Possible values for SourceType are: Repository, Organization
- SourceType *string `json:"source_type,omitempty"`
- Source string `json:"source"`
- // Possible values for Enforcement are: disabled, active, evaluate
- Enforcement string `json:"enforcement"`
- // Possible values for BypassMode are: none, repository, organization
- BypassMode *string `json:"bypass_mode,omitempty"`
- BypassActors []*BypassActor `json:"bypass_actors,omitempty"`
- NodeID *string `json:"node_id,omitempty"`
- Links *RulesetLinks `json:"_links,omitempty"`
- Conditions *RulesetConditions `json:"conditions,omitempty"`
- Rules []*RepositoryRule `json:"rules,omitempty"`
-}
-
-// GetRulesForBranch gets all the rules that apply to the specified branch.
-//
-// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-rules-for-a-branch
-func (s *RepositoriesService) GetRulesForBranch(ctx context.Context, owner, repo, branch string) ([]*RepositoryRule, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/rules/branches/%v", owner, repo, branch)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var rules []*RepositoryRule
- resp, err := s.client.Do(ctx, req, &rules)
- if err != nil {
- return nil, resp, err
- }
-
- return rules, resp, nil
-}
-
-// GetAllRulesets gets all the rules that apply to the specified repository.
-// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned.
-//
-// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-all-repository-rulesets
-func (s *RepositoriesService) GetAllRulesets(ctx context.Context, owner, repo string, includesParents bool) ([]*Ruleset, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/rulesets?includes_parents=%v", owner, repo, includesParents)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var ruleset []*Ruleset
- resp, err := s.client.Do(ctx, req, &ruleset)
- if err != nil {
- return nil, resp, err
- }
-
- return ruleset, resp, nil
-}
-
-// CreateRuleset creates a ruleset for the specified repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/repos/rules#create-a-repository-ruleset
-func (s *RepositoriesService) CreateRuleset(ctx context.Context, owner, repo string, rs *Ruleset) (*Ruleset, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/rulesets", owner, repo)
-
- req, err := s.client.NewRequest("POST", u, rs)
- if err != nil {
- return nil, nil, err
- }
-
- var ruleset *Ruleset
- resp, err := s.client.Do(ctx, req, &ruleset)
- if err != nil {
- return nil, resp, err
- }
-
- return ruleset, resp, nil
-}
-
-// GetRuleset gets a ruleset for the specified repository.
-// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned.
-//
-// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-a-repository-ruleset
-func (s *RepositoriesService) GetRuleset(ctx context.Context, owner, repo string, rulesetID int64, includesParents bool) (*Ruleset, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/rulesets/%v?includes_parents=%v", owner, repo, rulesetID, includesParents)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var ruleset *Ruleset
- resp, err := s.client.Do(ctx, req, &ruleset)
- if err != nil {
- return nil, resp, err
- }
-
- return ruleset, resp, nil
-}
-
-// UpdateRuleset updates a ruleset for the specified repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/repos/rules#update-a-repository-ruleset
-func (s *RepositoriesService) UpdateRuleset(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
-
- req, err := s.client.NewRequest("PUT", u, rs)
- if err != nil {
- return nil, nil, err
- }
-
- var ruleset *Ruleset
- resp, err := s.client.Do(ctx, req, &ruleset)
- if err != nil {
- return nil, resp, err
- }
-
- return ruleset, resp, nil
-}
-
-// DeleteRuleset deletes a ruleset for the specified repository.
-//
-// GitHub API docs: https://docs.github.com/en/rest/repos/rules#delete-a-repository-ruleset
-func (s *RepositoriesService) DeleteRuleset(ctx context.Context, owner, repo string, rulesetID int64) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(ctx, req, nil)
-}
diff --git a/vendor/github.com/google/go-github/v53/github/users_projects.go b/vendor/github.com/google/go-github/v53/github/users_projects.go
deleted file mode 100644
index 0cbd61f9..00000000
--- a/vendor/github.com/google/go-github/v53/github/users_projects.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2019 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "context"
- "fmt"
-)
-
-// ListProjects lists the projects for the specified user.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-user-projects
-func (s *UsersService) ListProjects(ctx context.Context, user string, opts *ProjectListOptions) ([]*Project, *Response, error) {
- u := fmt.Sprintf("users/%v/projects", user)
- u, err := addOptions(u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- var projects []*Project
- resp, err := s.client.Do(ctx, req, &projects)
- if err != nil {
- return nil, resp, err
- }
-
- return projects, resp, nil
-}
-
-// CreateUserProjectOptions specifies the parameters to the UsersService.CreateProject method.
-type CreateUserProjectOptions struct {
- // The name of the project. (Required.)
- Name string `json:"name"`
- // The description of the project. (Optional.)
- Body *string `json:"body,omitempty"`
-}
-
-// CreateProject creates a GitHub Project for the current user.
-//
-// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-a-user-project
-func (s *UsersService) CreateProject(ctx context.Context, opts *CreateUserProjectOptions) (*Project, *Response, error) {
- u := "user/projects"
- req, err := s.client.NewRequest("POST", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- project := &Project{}
- resp, err := s.client.Do(ctx, req, project)
- if err != nil {
- return nil, resp, err
- }
-
- return project, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/v53/AUTHORS b/vendor/github.com/google/go-github/v72/AUTHORS
similarity index 78%
rename from vendor/github.com/google/go-github/v53/AUTHORS
rename to vendor/github.com/google/go-github/v72/AUTHORS
index 5e40cd1f..a6104ffa 100644
--- a/vendor/github.com/google/go-github/v53/AUTHORS
+++ b/vendor/github.com/google/go-github/v72/AUTHORS
@@ -11,13 +11,21 @@
178inaba
2BFL
413x
+6543 <6543@obermui.de>
Abed Kibbe
+Abhijit Hota
Abhinav Gupta
+abhishek
+Abhishek Sharma
Abhishek Veeramalla
aboy
+Adam Kohring
+Aditya Mahendrakar
adrienzieba
afdesk
+Ahmad Nurus S
Ahmed Hagy
+Aidan
Aidan Steele
Ainsley Chong
ajz01
@@ -26,23 +34,31 @@ Akhil Mohan
Alec Thomas
Aleks Clark
Alex Bramley
+Alex Ellis
Alex Orr
Alex Su
Alex Unger
Alexander Harkness
+Alexey Alekhin
+Alexis Couvreur
Alexis Gauthiez
Ali Farooq
+Alin Balutoiu
Allan Guwatudde
Allen Sun
Amey Sakhadeo
Anders Janmyr
+Andreas Deininger
Andreas Garnæs
+Andrew Gillis
Andrew Ryabchun
Andrew Svoboda
+Andriyun
Andy Grunwald
Andy Hume
Andy Lindeman
angie pinilla
+Anish Rajan
anjanashenoy
Anshuman Bhartiya
Antoine
@@ -51,8 +67,10 @@ Anton Nguyen
Anubha Kushwaha
appilon
aprp
+apurwaj2
Aravind
Arda Kuyumcu
+Ary
Arıl Bozoluk
Asier Marruedo
Austin Burdine
@@ -60,12 +78,17 @@ Austin Dizzy
Azuka Okuleye
Ben Batha
Benjamen Keroack
+Benjamin Nater
+Berkay Tacyildiz
Beshr Kayali
Beyang Liu
+billnapier
Billy Keyes
Billy Lynch
+Bingtan Lu
Bjorn Neergaard
Björn Häuser
+Bo Huang
boljen
Bracken
Brad Harris
@@ -74,6 +97,7 @@ Bradley Falzon
Bradley McAllister
Brandon Butler
Brandon Cook
+Brandon Stubbs
Brett Kuhlman
Brett Logan
Brian Egizi
@@ -83,11 +107,13 @@ Cami Diez
Carl Johnson
Carlos Alexandro Becker
Carlos Tadeu Panato Junior
+Casey
ChandanChainani
chandresh-pancholi
Charles Fenwick Elliott
Charlie Yan
Chmouel Boudjnah
+Chris Allen Lane
Chris King
Chris Mc
Chris Raborg
@@ -96,8 +122,10 @@ Chris Schaefer
chrisforrette
Christian Bargmann
Christian Muehlhaeuser
+Christoph Jerolimov
Christoph Sassenberg